summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c7
-rw-r--r--drivers/scsi/Kconfig22
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aacraid/commctrl.c14
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c10
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/sa.c2
-rw-r--r--drivers/scsi/advansys.c7
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.reg185
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c15
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c12
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped567
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped1723
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.reg124
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped875
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped1165
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y10
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c3
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/ch.c8
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/cxgb3i/Kbuild4
-rw-r--r--drivers/scsi/cxgb3i/Kconfig7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i.h139
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.c770
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h306
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c107
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c951
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c1810
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.h231
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c402
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.h59
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c24
-rw-r--r--drivers/scsi/dpt_i2o.c6
-rw-r--r--drivers/scsi/eata.c15
-rw-r--r--drivers/scsi/eata_pio.c4
-rw-r--r--drivers/scsi/esp_scsi.c6
-rw-r--r--drivers/scsi/fcoe/Makefile8
-rw-r--r--drivers/scsi/fcoe/fc_transport_fcoe.c446
-rw-r--r--drivers/scsi/fcoe/fcoe_sw.c494
-rw-r--r--drivers/scsi/fcoe/libfcoe.c1510
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/gdth.c12
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c295
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h32
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c16
-rw-r--r--drivers/scsi/ide-scsi.c80
-rw-r--r--drivers/scsi/in2000.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/initio.h2
-rw-r--r--drivers/scsi/ipr.c23
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c1657
-rw-r--r--drivers/scsi/iscsi_tcp.h88
-rw-r--r--drivers/scsi/libfc/Makefile12
-rw-r--r--drivers/scsi/libfc/fc_disc.c845
-rw-r--r--drivers/scsi/libfc/fc_elsct.c71
-rw-r--r--drivers/scsi/libfc/fc_exch.c1970
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
-rw-r--r--drivers/scsi/libfc/fc_frame.c89
-rw-r--r--drivers/scsi/libfc/fc_lport.c1604
-rw-r--r--drivers/scsi/libfc/fc_rport.c1291
-rw-r--r--drivers/scsi/libiscsi.c430
-rw-r--r--drivers/scsi/libiscsi_tcp.c1163
-rw-r--r--drivers/scsi/lpfc/lpfc.h118
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c1530
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h66
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c564
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c1834
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c302
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h432
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1485
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c660
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h179
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1671
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1820
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c196
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/mac_esp.c100
-rw-r--r--drivers/scsi/mac_scsi.c1
-rw-r--r--drivers/scsi/megaraid.c11
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c15
-rw-r--r--drivers/scsi/nsp32.c3
-rw-r--r--drivers/scsi/osst.c3
-rw-r--r--drivers/scsi/qla1280.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c329
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h585
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c481
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1275
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c854
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c886
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c516
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1501
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c573
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c7
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/scsi.c10
-rw-r--r--drivers/scsi/scsi_error.c82
-rw-r--r--drivers/scsi/scsi_ioctl.c12
-rw-r--r--drivers/scsi/scsi_lib.c339
-rw-r--r--drivers/scsi/scsi_netlink.c9
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c18
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c19
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/sd.c84
-rw-r--r--drivers/scsi/sd.h21
-rw-r--r--drivers/scsi/sd_dif.c42
-rw-r--r--drivers/scsi/ses.c9
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sr.c44
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/sr_vendor.c12
-rw-r--r--drivers/scsi/st.c263
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/scsi/sun3x_esp.c4
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/tmscsim.c3
-rw-r--r--drivers/scsi/u14-34f.c3
-rw-r--r--drivers/scsi/wd7000.c4
160 files changed, 33076 insertions, 10790 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b92c19bb6876..5311317c2e4c 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1924,12 +1924,9 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
(cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
if (scsi_sg_count(cmd) == 1) {
- unsigned long flags;
void *buf = tw_dev->generic_buffer_virt[request_id];
- local_irq_save(flags);
scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
- local_irq_restore(flags);
}
}
} /* End twa_scsiop_execute_scsi_complete() */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a0537f09aa21..c03f1d2c9e2e 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1466,12 +1466,7 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
void *data, unsigned int len)
{
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- unsigned long flags;
-
- local_irq_save(flags);
- scsi_sg_copy_from_buffer(cmd, data, len);
- local_irq_restore(flags);
+ scsi_sg_copy_from_buffer(tw_dev->srb[request_id], data, len);
}
/* This function is called by the isr to complete an inquiry command */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 403ecad48d4b..152d4aa9354f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -352,6 +352,8 @@ config ISCSI_TCP
http://open-iscsi.org
+source "drivers/scsi/cxgb3i/Kconfig"
+
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_HAS_WD93 && SCSI
@@ -603,6 +605,19 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config LIBFC
+ tristate "LibFC module"
+ select SCSI_FC_ATTRS
+ ---help---
+ Fibre Channel library module
+
+config FCOE
+ tristate "FCoE module"
+ depends on PCI
+ select LIBFC
+ ---help---
+ Fibre Channel over Ethernet module
+
config SCSI_DMX3191D
tristate "DMX3191D SCSI support"
depends on PCI && SCSI
@@ -1357,6 +1372,13 @@ config SCSI_LPFC
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
+config SCSI_LPFC_DEBUG_FS
+ bool "Emulex LightPulse Fibre Channel debugfs Support"
+ depends on SCSI_LPFC && DEBUG_FS
+ help
+ This makes debugging infomation from the lpfc driver
+ available via the debugfs filesystem.
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd5043cfa1..1410697257cb 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,7 +36,9 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
-obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
+obj-$(CONFIG_LIBFC) += libfc/
+obj-$(CONFIG_FCOE) += fcoe/
+obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
@@ -124,6 +126,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
obj-$(CONFIG_PS3_ROM) += ps3rom.o
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index eeddbd19eba5..f92da9fd5f20 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -30,7 +30,7 @@
* $Log: NCR5380.c,v $
* Revision 1.10 1998/9/2 Alan Cox
- * (alan@redhat.com)
+ * (alan@lxorguk.ukuu.org.uk)
* Fixed up the timer lockups reported so far. Things still suck. Looking
* forward to 2.3 and per device request queues. Then it'll be possible to
* SMP thread this beast and improve life no end.
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 84bb61628372..3c298c7253ee 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -54,7 +54,7 @@
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
- * 14/06/07 Alan Cox <alan@redhat.com>
+ * 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Grand cleanup and Linuxisation
*/
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8abfd06b5a72..90d1d0878cb8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index a7355260cfcf..0391d759dfdb 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
@@ -90,14 +90,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
+ dma_addr_t daddr;
+
if (size > 2048) {
retval = -EINVAL;
goto cleanup;
}
+
+ kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
+ if (!kfib) {
+ retval = -ENOMEM;
+ goto cleanup;
+ }
+
/* Highjack the hw_fib */
hw_fib = fibptr->hw_fib_va;
hw_fib_pa = fibptr->hw_fib_pa;
- fibptr->hw_fib_va = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
+ fibptr->hw_fib_va = kfib;
+ fibptr->hw_fib_pa = daddr;
memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
memcpy(kfib, hw_fib, dev->max_fib_size);
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index cbac06355107..16310443b55a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 289304aab690..d24c2670040b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index 933f208eedba..abc9ef5d1b10 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 9aa301c1ed07..36d8aab97efe 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
@@ -175,8 +175,8 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
@@ -427,8 +427,8 @@ static int aac_slave_configure(struct scsi_device *sdev)
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
*/
- if (sdev->timeout < (45 * HZ))
- sdev->timeout = 45 * HZ;
+ if (sdev->request_queue->rq_timeout < (45 * HZ))
+ blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
for (cid = 0; cid < aac->maximum_num_containers; ++cid)
if (aac->fsa_dev[cid].valid)
++num_lsu;
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 8cd6588a83e3..16d8db550027 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 073208b0f622..f70d9f8e79e5 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index fc1a55796a89..b6a3c5c187b6 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,6 +1,6 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 218777bfc143..2f602720193e 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -13425,8 +13425,7 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
}
boardp->asc_n_io_port = pci_resource_len(pdev, 1);
- boardp->ioremap_addr = ioremap(pci_resource_start(pdev, 1),
- boardp->asc_n_io_port);
+ boardp->ioremap_addr = pci_ioremap_bar(pdev, 1);
if (!boardp->ioremap_addr) {
shost_printk(KERN_ERR, shost, "ioremap(%lx, %d) "
"returned NULL\n",
@@ -13872,8 +13871,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
advansys_wide_free_mem(boardp);
free_irq(boardp->irq, shost);
err_free_dma:
+#ifdef CONFIG_ISA
if (shost->dma_channel != NO_ISA_DMA)
free_dma(shost->dma_channel);
+#endif
err_free_proc:
kfree(boardp->prtbuf);
err_unmap:
@@ -13894,10 +13895,12 @@ static int advansys_release(struct Scsi_Host *shost)
ASC_DBG(1, "begin\n");
scsi_remove_host(shost);
free_irq(board->irq, shost);
+#ifdef CONFIG_ISA
if (shost->dma_channel != NO_ISA_DMA) {
ASC_DBG(1, "free_dma()\n");
free_dma(shost->dma_channel);
}
+#endif
if (ASC_NARROW_BOARD(board)) {
dma_unmap_single(board->dev,
board->dvc_var.asc_dvc_var.overrun_dma,
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index b5a868d85eb4..1e5478abd90e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -337,7 +337,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
#else
#define IRQ_MIN 9
#if defined(__PPC)
-#define IRQ_MAX (NR_IRQS-1)
+#define IRQ_MAX (nr_irqs-1)
#else
#define IRQ_MAX 12
#endif
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 7c45d88a205b..ed0e3e55652a 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -22,7 +22,7 @@
* aha1740_makecode may still need even more work
* if it doesn't work for your devices, take a look.
*
- * Reworked for new_eh and new locking by Alan Cox <alan@redhat.com>
+ * Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Converted to EISA and generic DMA APIs by Marc Zyngier
* <maz@wild-wind.fr.eu.org>, 4/2003.
diff --git a/drivers/scsi/aic7xxx/aic79xx.reg b/drivers/scsi/aic7xxx/aic79xx.reg
index cca16fc5b4ad..0666c22ab55b 100644
--- a/drivers/scsi/aic7xxx/aic79xx.reg
+++ b/drivers/scsi/aic7xxx/aic79xx.reg
@@ -80,6 +80,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $"
}
/*
+ * Registers marked "dont_generate_debug_code" are not (yet) referenced
+ * from the driver code, and this keyword inhibit generation
+ * of debug code for them.
+ *
+ * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
+ * is added to the register which is referenced in the driver.
+ * Unreferenced register with no dont_generate_debug_code will result
+ * in dead code. No warning is issued.
+ */
+
+/*
* Mode Pointer
* Controls which of the 5, 512byte, address spaces should be used
* as the source and destination of any register accesses in our
@@ -91,6 +102,7 @@ register MODE_PTR {
field DST_MODE 0x70
field SRC_MODE 0x07
mode_pointer
+ dont_generate_debug_code
}
const SRC_MODE_SHIFT 0
@@ -190,6 +202,7 @@ register SEQINTCODE {
SAW_HWERR,
BAD_SCB_STATUS
}
+ dont_generate_debug_code
}
/*
@@ -207,6 +220,7 @@ register CLRINT {
field CLRSEQINT 0x04
field CLRCMDINT 0x02
field CLRSPLTINT 0x01
+ dont_generate_debug_code
}
/*
@@ -222,6 +236,7 @@ register ERROR {
field SQPARERR 0x08
field ILLOPCODE 0x04
field DSCTMOUT 0x02
+ dont_generate_debug_code
}
/*
@@ -255,6 +270,7 @@ register HCNTRL {
field INTEN 0x02
field CHIPRST 0x01
field CHIPRSTACK 0x01
+ dont_generate_debug_code
}
/*
@@ -265,6 +281,7 @@ register HNSCB_QOFF {
access_mode RW
size 2
count 2
+ dont_generate_debug_code
}
/*
@@ -274,6 +291,7 @@ register HESCB_QOFF {
address 0x008
access_mode RW
count 2
+ dont_generate_debug_code
}
/*
@@ -311,6 +329,7 @@ register CLRSEQINTSTAT {
field CLRSEQ_SCSIINT 0x04
field CLRSEQ_PCIINT 0x02
field CLRSEQ_SPLTINT 0x01
+ dont_generate_debug_code
}
/*
@@ -320,6 +339,7 @@ register SWTIMER {
address 0x00E
access_mode RW
size 2
+ dont_generate_debug_code
}
/*
@@ -330,6 +350,7 @@ register SNSCB_QOFF {
access_mode RW
size 2
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -340,6 +361,7 @@ register SESCB_QOFF {
count 2
access_mode RW
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -350,6 +372,7 @@ register SDSCB_QOFF {
access_mode RW
modes M_CCHAN
size 2
+ dont_generate_debug_code
}
/*
@@ -378,6 +401,7 @@ register QOFF_CTLSTA {
SCB_QSIZE_8192,
SCB_QSIZE_16384
}
+ dont_generate_debug_code
}
/*
@@ -431,6 +455,7 @@ register DSCOMMAND0 {
field EXTREQLCK 0x10 /* External Request Lock */
field DISABLE_TWATE 0x02 /* Rev B or greater */
field CIOPARCKEN 0x01 /* Internal bus parity error enable */
+ dont_generate_debug_code
}
/*
@@ -459,6 +484,7 @@ register SG_CACHE_PRE {
field SG_ADDR_MASK 0xf8
field ODD_SEG 0x04
field LAST_SEG 0x02
+ dont_generate_debug_code
}
register SG_CACHE_SHADOW {
@@ -491,6 +517,7 @@ register HADDR {
access_mode RW
size 8
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -522,6 +549,7 @@ register HCNT {
access_mode RW
size 3
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -551,6 +579,7 @@ register SGHADDR {
access_mode RW
size 8
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -561,6 +590,7 @@ register SCBHADDR {
access_mode RW
size 8
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -570,6 +600,7 @@ register SGHCNT {
address 0x084
access_mode RW
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -579,6 +610,7 @@ register SCBHCNT {
address 0x084
access_mode RW
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -609,6 +641,7 @@ register DFF_THRSH {
RD_DFTHRSH_90,
RD_DFTHRSH_MAX
}
+ dont_generate_debug_code
}
/*
@@ -817,6 +850,7 @@ register PCIXCTL {
field SRSPDPEEN 0x04
field TSCSERREN 0x02
field CMPABCDIS 0x01
+ dont_generate_debug_code
}
/*
@@ -863,6 +897,7 @@ register DCHSPLTSTAT0 {
field RXOVRUN 0x04
field RXSCEMSG 0x02
field RXSPLTRSP 0x01
+ dont_generate_debug_code
}
/*
@@ -908,6 +943,7 @@ register DCHSPLTSTAT1 {
modes M_DFF0, M_DFF1
count 2
field RXDATABUCKET 0x01
+ dont_generate_debug_code
}
/*
@@ -1069,6 +1105,7 @@ register SGSPLTSTAT0 {
field RXOVRUN 0x04
field RXSCEMSG 0x02
field RXSPLTRSP 0x01
+ dont_generate_debug_code
}
/*
@@ -1080,6 +1117,7 @@ register SGSPLTSTAT1 {
modes M_DFF0, M_DFF1
count 2
field RXDATABUCKET 0x01
+ dont_generate_debug_code
}
/*
@@ -1091,6 +1129,7 @@ register SFUNCT {
modes M_CFG
field TEST_GROUP 0xF0
field TEST_NUM 0x0F
+ dont_generate_debug_code
}
/*
@@ -1109,6 +1148,7 @@ register DF0PCISTAT {
field RDPERR 0x04
field TWATERR 0x02
field DPR 0x01
+ dont_generate_debug_code
}
/*
@@ -1204,6 +1244,7 @@ register TARGPCISTAT {
field SSE 0x40
field STA 0x08
field TWATERR 0x02
+ dont_generate_debug_code
}
/*
@@ -1216,6 +1257,7 @@ register LQIN {
size 20
count 2
modes M_DFF0, M_DFF1, M_SCSI
+ dont_generate_debug_code
}
/*
@@ -1247,6 +1289,7 @@ register LUNPTR {
access_mode RW
modes M_CFG
count 2
+ dont_generate_debug_code
}
/*
@@ -1278,6 +1321,7 @@ register CMDLENPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1290,6 +1334,7 @@ register ATTRPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1302,6 +1347,7 @@ register FLAGPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1313,6 +1359,7 @@ register CMDPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1324,6 +1371,7 @@ register QNEXTPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1347,6 +1395,7 @@ register ABRTBYTEPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1358,6 +1407,7 @@ register ABRTBITPTR {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1398,6 +1448,7 @@ register LUNLEN {
count 2
mask ILUNLEN 0x0F
mask TLUNLEN 0xF0
+ dont_generate_debug_code
}
const LUNLEN_SINGLE_LEVEL_LUN 0xF
@@ -1410,6 +1461,7 @@ register CDBLIMIT {
access_mode RW
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -1422,6 +1474,7 @@ register MAXCMD {
access_mode RW
modes M_CFG
count 9
+ dont_generate_debug_code
}
/*
@@ -1432,6 +1485,7 @@ register MAXCMDCNT {
address 0x033
access_mode RW
modes M_CFG
+ dont_generate_debug_code
}
/*
@@ -1490,6 +1544,7 @@ register LQCTL1 {
field PCI2PCI 0x04
field SINGLECMD 0x02
field ABORTPENDING 0x01
+ dont_generate_debug_code
}
/*
@@ -1508,6 +1563,7 @@ register LQCTL2 {
field LQOCONTINUE 0x04
field LQOTOIDLE 0x02
field LQOPAUSE 0x01
+ dont_generate_debug_code
}
/*
@@ -1578,6 +1634,7 @@ register SXFRCTL0 {
field DFPEXP 0x40
field BIOSCANCELEN 0x10
field SPIOEN 0x08
+ dont_generate_debug_code
}
/*
@@ -1594,6 +1651,7 @@ register SXFRCTL1 {
field ENSTIMER 0x04
field ACTNEGEN 0x02
field STPWEN 0x01
+ dont_generate_debug_code
}
/*
@@ -1696,6 +1754,7 @@ register SCSISIGO {
P_STATUS CDO|IOO,
P_MESGIN CDO|IOO|MSGO
}
+ dont_generate_debug_code
}
/*
@@ -1738,6 +1797,7 @@ register MULTARGID {
modes M_CFG
size 2
count 2
+ dont_generate_debug_code
}
/*
@@ -1774,6 +1834,7 @@ register SCSIDAT {
access_mode RW
modes M_DFF0, M_DFF1, M_SCSI
size 2
+ dont_generate_debug_code
}
/*
@@ -1796,6 +1857,7 @@ register TARGIDIN {
count 2
field CLKOUT 0x80
field TARGID 0x0F
+ dont_generate_debug_code
}
/*
@@ -1825,6 +1887,7 @@ register SBLKCTL {
field ENAB40 0x08 /* LVD transceiver active */
field ENAB20 0x04 /* SE/HVD transceiver active */
field SELWIDE 0x02
+ dont_generate_debug_code
}
/*
@@ -1842,6 +1905,7 @@ register OPTIONMODE {
field ENDGFORMCHK 0x04
field AUTO_MSGOUT_DE 0x02
mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE
+ dont_generate_debug_code
}
/*
@@ -1876,6 +1940,7 @@ register CLRSINT0 {
field CLROVERRUN 0x04
field CLRSPIORDY 0x02
field CLRARBDO 0x01
+ dont_generate_debug_code
}
/*
@@ -1929,6 +1994,7 @@ register CLRSINT1 {
field CLRSCSIPERR 0x04
field CLRSTRB2FAST 0x02
field CLRREQINIT 0x01
+ dont_generate_debug_code
}
/*
@@ -1962,6 +2028,7 @@ register CLRSINT2 {
field CLRWIDE_RES 0x04 /* Modes 0 and 1 only */
field CLRSDONE 0x02 /* Modes 0 and 1 only */
field CLRDMADONE 0x01 /* Modes 0 and 1 only */
+ dont_generate_debug_code
}
/*
@@ -2002,6 +2069,7 @@ register LQISTATE {
access_mode RO
modes M_CFG
count 6
+ dont_generate_debug_code
}
/*
@@ -2022,6 +2090,7 @@ register LQOSTATE {
access_mode RO
modes M_CFG
count 2
+ dont_generate_debug_code
}
/*
@@ -2054,6 +2123,7 @@ register CLRLQIINT0 {
field CLRLQIBADLQT 0x04
field CLRLQIATNLQ 0x02
field CLRLQIATNCMD 0x01
+ dont_generate_debug_code
}
/*
@@ -2070,6 +2140,7 @@ register LQIMODE0 {
field ENLQIBADLQT 0x04
field ENLQIATNLQ 0x02
field ENLQIATNCMD 0x01
+ dont_generate_debug_code
}
/*
@@ -2106,6 +2177,7 @@ register CLRLQIINT1 {
field CLRLQIBADLQI 0x04
field CLRLQIOVERI_LQ 0x02
field CLRLQIOVERI_NLQ 0x01
+ dont_generate_debug_code
}
/*
@@ -2124,6 +2196,7 @@ register LQIMODE1 {
field ENLQIBADLQI 0x04
field ENLQIOVERI_LQ 0x02 /* LQIOVERI1 */
field ENLQIOVERI_NLQ 0x01 /* LQIOVERI2 */
+ dont_generate_debug_code
}
/*
@@ -2165,6 +2238,7 @@ register CLRSINT3 {
count 3
field CLRNTRAMPERR 0x02
field CLROSRAMPERR 0x01
+ dont_generate_debug_code
}
/*
@@ -2177,6 +2251,7 @@ register SIMODE3 {
count 4
field ENNTRAMPERR 0x02
field ENOSRAMPERR 0x01
+ dont_generate_debug_code
}
/*
@@ -2207,6 +2282,7 @@ register CLRLQOINT0 {
field CLRLQOATNLQ 0x04
field CLRLQOATNPKT 0x02
field CLRLQOTCRC 0x01
+ dont_generate_debug_code
}
/*
@@ -2222,6 +2298,7 @@ register LQOMODE0 {
field ENLQOATNLQ 0x04
field ENLQOATNPKT 0x02
field ENLQOTCRC 0x01
+ dont_generate_debug_code
}
/*
@@ -2251,6 +2328,7 @@ register CLRLQOINT1 {
field CLRLQOBADQAS 0x04
field CLRLQOBUSFREE 0x02
field CLRLQOPHACHGINPKT 0x01
+ dont_generate_debug_code
}
/*
@@ -2266,6 +2344,7 @@ register LQOMODE1 {
field ENLQOBADQAS 0x04
field ENLQOBUSFREE 0x02
field ENLQOPHACHGINPKT 0x01
+ dont_generate_debug_code
}
/*
@@ -2289,6 +2368,7 @@ register OS_SPACE_CNT {
access_mode RO
modes M_CFG
count 2
+ dont_generate_debug_code
}
/*
@@ -2318,6 +2398,7 @@ register GSFIFO {
access_mode RO
size 2
modes M_DFF0, M_DFF1, M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2341,6 +2422,7 @@ register NEXTSCB {
access_mode RW
size 2
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2357,6 +2439,7 @@ register LQOSCSCTL {
field LQOBUSETDLY 0x40
field LQONOHOLDLACK 0x02
field LQONOCHKOVER 0x01
+ dont_generate_debug_code
}
/*
@@ -2389,6 +2472,7 @@ register CLRSEQINTSRC {
field CLRCFG4TSTAT 0x04
field CLRCFG4ICMD 0x02
field CLRCFG4TCMD 0x01
+ dont_generate_debug_code
}
/*
@@ -2415,6 +2499,7 @@ register CURRSCB {
access_mode RW
size 2
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2472,6 +2557,7 @@ register LASTSCB {
access_mode RW
size 2
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2494,6 +2580,7 @@ register SHADDR {
access_mode RO
size 8
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -2513,6 +2600,7 @@ register NEGOADDR {
address 0x060
access_mode RW
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2523,6 +2611,7 @@ register NEGPERIOD {
access_mode RW
modes M_SCSI
count 1
+ dont_generate_debug_code
}
/*
@@ -2543,6 +2632,7 @@ register NEGOFFSET {
access_mode RW
modes M_SCSI
count 1
+ dont_generate_debug_code
}
/*
@@ -2557,6 +2647,7 @@ register NEGPPROPTS {
field PPROPT_QAS 0x04
field PPROPT_DT 0x02
field PPROPT_IUT 0x01
+ dont_generate_debug_code
}
/*
@@ -2573,6 +2664,7 @@ register NEGCONOPTS {
field ENAUTOATNI 0x04
field ENAUTOATNO 0x02
field WIDEXFER 0x01
+ dont_generate_debug_code
}
/*
@@ -2583,6 +2675,7 @@ register ANNEXCOL {
access_mode RW
modes M_SCSI
count 7
+ dont_generate_debug_code
}
/*
@@ -2602,6 +2695,7 @@ register SCSCHKN {
field DFFACTCLR 0x04
field SHVALIDSTDIS 0x02
field LSTSGCLRDIS 0x01
+ dont_generate_debug_code
}
const AHD_ANNEXCOL_PER_DEV0 4
@@ -2635,6 +2729,7 @@ register ANNEXDAT {
access_mode RW
modes M_SCSI
count 3
+ dont_generate_debug_code
}
/*
@@ -2645,6 +2740,7 @@ register IOWNID {
address 0x067
access_mode RW
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2671,6 +2767,7 @@ register TOWNID {
access_mode RW
modes M_SCSI
count 2
+ dont_generate_debug_code
}
/*
@@ -2702,6 +2799,7 @@ register SHCNT {
access_mode RW
size 3
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -2789,6 +2887,7 @@ register SCBPTR {
access_mode RW
size 2
modes M_DFF0, M_DFF1, M_CCHAN, M_SCSI
+ dont_generate_debug_code
}
/*
@@ -2816,6 +2915,7 @@ register SCBAUTOPTR {
field AUSCBPTR_EN 0x80
field SCBPTR_ADDR 0x38
field SCBPTR_OFF 0x07
+ dont_generate_debug_code
}
/*
@@ -2825,6 +2925,7 @@ register CCSGADDR {
address 0x0AC
access_mode RW
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -2834,6 +2935,7 @@ register CCSCBADDR {
address 0x0AC
access_mode RW
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -2899,6 +3001,7 @@ register CCSGRAM {
address 0x0B0
access_mode RW
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -2908,6 +3011,7 @@ register CCSCBRAM {
address 0x0B0
access_mode RW
modes M_CCHAN
+ dont_generate_debug_code
}
/*
@@ -2958,6 +3062,7 @@ register BRDDAT {
access_mode RW
modes M_SCSI
count 2
+ dont_generate_debug_code
}
/*
@@ -2974,6 +3079,7 @@ register BRDCTL {
field BRDEN 0x04
field BRDRW 0x02
field BRDSTB 0x01
+ dont_generate_debug_code
}
/*
@@ -2984,6 +3090,7 @@ register SEEADR {
access_mode RW
modes M_SCSI
count 4
+ dont_generate_debug_code
}
/*
@@ -2995,6 +3102,7 @@ register SEEDAT {
size 2
modes M_SCSI
count 4
+ dont_generate_debug_code
}
/*
@@ -3011,6 +3119,7 @@ register SEESTAT {
field SEEARBACK 0x04
field SEEBUSY 0x02
field SEESTART 0x01
+ dont_generate_debug_code
}
/*
@@ -3036,6 +3145,7 @@ register SEECTL {
mask SEEOP_EWDS 0x40
field SEERST 0x02
field SEESTART 0x01
+ dont_generate_debug_code
}
const SEEOP_ERAL_ADDR 0x80
@@ -3050,6 +3160,7 @@ register SCBCNT {
address 0x0BF
access_mode RW
modes M_SCSI
+ dont_generate_debug_code
}
/*
@@ -3061,6 +3172,7 @@ register DFWADDR {
access_mode RW
size 2
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -3087,6 +3199,7 @@ register DSPDATACTL {
field DESQDIS 0x10
field RCVROFFSTDIS 0x04
field XMITOFFSTDIS 0x02
+ dont_generate_debug_code
}
/*
@@ -3132,6 +3245,7 @@ register DFDAT {
address 0x0C4
access_mode RW
modes M_DFF0, M_DFF1
+ dont_generate_debug_code
}
/*
@@ -3144,6 +3258,7 @@ register DSPSELECT {
count 1
field AUTOINCEN 0x80
field DSPSEL 0x1F
+ dont_generate_debug_code
}
const NUMDSPS 0x14
@@ -3158,6 +3273,7 @@ register WRTBIASCTL {
count 3
field AUTOXBCDIS 0x80
field XMITMANVAL 0x3F
+ dont_generate_debug_code
}
/*
@@ -3316,6 +3432,7 @@ register FLAGS {
count 23
field ZERO 0x02
field CARRY 0x01
+ dont_generate_debug_code
}
/*
@@ -3344,6 +3461,7 @@ register SEQRAM {
address 0x0DA
access_mode RW
count 2
+ dont_generate_debug_code
}
/*
@@ -3355,6 +3473,7 @@ register PRGMCNT {
access_mode RW
size 2
count 5
+ dont_generate_debug_code
}
/*
@@ -3364,6 +3483,7 @@ register ACCUM {
address 0x0E0
access_mode RW
accumulator
+ dont_generate_debug_code
}
/*
@@ -3380,6 +3500,7 @@ register SINDEX {
access_mode RW
size 2
sindex
+ dont_generate_debug_code
}
/*
@@ -3390,6 +3511,7 @@ register DINDEX {
address 0x0E4
access_mode RW
size 2
+ dont_generate_debug_code
}
/*
@@ -3415,6 +3537,7 @@ register ALLONES {
address 0x0E8
access_mode RO
allones
+ dont_generate_debug_code
}
/*
@@ -3425,6 +3548,7 @@ register ALLZEROS {
address 0x0EA
access_mode RO
allzeros
+ dont_generate_debug_code
}
/*
@@ -3435,6 +3559,7 @@ register NONE {
address 0x0EA
access_mode WO
none
+ dont_generate_debug_code
}
/*
@@ -3445,6 +3570,7 @@ register NONE {
register SINDIR {
address 0x0EC
access_mode RO
+ dont_generate_debug_code
}
/*
@@ -3455,6 +3581,7 @@ register SINDIR {
register DINDIR {
address 0x0ED
access_mode WO
+ dont_generate_debug_code
}
/*
@@ -3479,6 +3606,7 @@ register FUNCTION1 {
register STACK {
address 0x0F2
access_mode RW
+ dont_generate_debug_code
}
/*
@@ -3491,6 +3619,7 @@ register INTVEC1_ADDR {
size 2
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -3503,6 +3632,7 @@ register CURADDR {
size 2
modes M_SCSI
count 2
+ dont_generate_debug_code
}
/*
@@ -3515,6 +3645,7 @@ register INTVEC2_ADDR {
size 2
modes M_CFG
count 1
+ dont_generate_debug_code
}
/*
@@ -3543,12 +3674,14 @@ scratch_ram {
modes 0, 1, 2, 3
REG0 {
size 2
+ dont_generate_debug_code
}
REG1 {
size 2
}
REG_ISR {
size 2
+ dont_generate_debug_code
}
SG_STATE {
size 1
@@ -3572,9 +3705,11 @@ scratch_ram {
modes 0, 1, 2, 3
LONGJMP_ADDR {
size 2
+ dont_generate_debug_code
}
ACCUM_SAVE {
size 1
+ dont_generate_debug_code
}
}
@@ -3591,18 +3726,22 @@ scratch_ram {
*/
WAITING_SCB_TAILS {
size 32
+ dont_generate_debug_code
}
WAITING_TID_HEAD {
size 2
+ dont_generate_debug_code
}
WAITING_TID_TAIL {
size 2
+ dont_generate_debug_code
}
/*
* SCBID of the next SCB in the new SCB queue.
*/
NEXT_QUEUED_SCB_ADDR {
size 4
+ dont_generate_debug_code
}
/*
* head of list of SCBs that have
@@ -3611,6 +3750,7 @@ scratch_ram {
*/
COMPLETE_SCB_HEAD {
size 2
+ dont_generate_debug_code
}
/*
* The list of completed SCBs in
@@ -3618,6 +3758,7 @@ scratch_ram {
*/
COMPLETE_SCB_DMAINPROG_HEAD {
size 2
+ dont_generate_debug_code
}
/*
* head of list of SCBs that have
@@ -3626,6 +3767,7 @@ scratch_ram {
*/
COMPLETE_DMA_SCB_HEAD {
size 2
+ dont_generate_debug_code
}
/*
* tail of list of SCBs that have
@@ -3634,6 +3776,7 @@ scratch_ram {
*/
COMPLETE_DMA_SCB_TAIL {
size 2
+ dont_generate_debug_code
}
/*
* head of list of SCBs that have
@@ -3643,6 +3786,7 @@ scratch_ram {
*/
COMPLETE_ON_QFREEZE_HEAD {
size 2
+ dont_generate_debug_code
}
/*
* Counting semaphore to prevent new select-outs
@@ -3667,6 +3811,7 @@ scratch_ram {
*/
MSG_OUT {
size 1
+ dont_generate_debug_code
}
/* Parameters for DMA Logic */
DMAPARAMS {
@@ -3682,6 +3827,7 @@ scratch_ram {
field DIRECTION 0x04 /* Set indicates PCI->SCSI */
field FIFOFLUSH 0x02
field FIFORESET 0x01
+ dont_generate_debug_code
}
SEQ_FLAGS {
size 1
@@ -3703,9 +3849,11 @@ scratch_ram {
*/
SAVED_SCSIID {
size 1
+ dont_generate_debug_code
}
SAVED_LUN {
size 1
+ dont_generate_debug_code
}
/*
* The last bus phase as seen by the sequencer.
@@ -3733,6 +3881,7 @@ scratch_ram {
*/
QOUTFIFO_ENTRY_VALID_TAG {
size 1
+ dont_generate_debug_code
}
/*
* Kernel and sequencer offsets into the queue of
@@ -3742,10 +3891,12 @@ scratch_ram {
KERNEL_TQINPOS {
size 1
count 1
+ dont_generate_debug_code
}
TQINPOS {
size 1
count 8
+ dont_generate_debug_code
}
/*
* Base address of our shared data with the kernel driver in host
@@ -3754,6 +3905,7 @@ scratch_ram {
*/
SHARED_DATA_ADDR {
size 4
+ dont_generate_debug_code
}
/*
* Pointer to location in host memory for next
@@ -3761,6 +3913,7 @@ scratch_ram {
*/
QOUTFIFO_NEXT_ADDR {
size 4
+ dont_generate_debug_code
}
ARG_1 {
size 1
@@ -3773,11 +3926,13 @@ scratch_ram {
mask CONT_MSG_LOOP_READ 0x03
mask CONT_MSG_LOOP_TARG 0x02
alias RETURN_1
+ dont_generate_debug_code
}
ARG_2 {
size 1
count 1
alias RETURN_2
+ dont_generate_debug_code
}
/*
@@ -3785,6 +3940,7 @@ scratch_ram {
*/
LAST_MSG {
size 1
+ dont_generate_debug_code
}
/*
@@ -3801,6 +3957,7 @@ scratch_ram {
field MANUALP 0x0C
field ENAUTOATNP 0x02
field ALTSTIM 0x01
+ dont_generate_debug_code
}
/*
@@ -3809,6 +3966,7 @@ scratch_ram {
INITIATOR_TAG {
size 1
count 1
+ dont_generate_debug_code
}
SEQ_FLAGS2 {
@@ -3820,6 +3978,7 @@ scratch_ram {
ALLOCFIFO_SCBPTR {
size 2
+ dont_generate_debug_code
}
/*
@@ -3829,6 +3988,7 @@ scratch_ram {
*/
INT_COALESCING_TIMER {
size 2
+ dont_generate_debug_code
}
/*
@@ -3838,6 +3998,7 @@ scratch_ram {
*/
INT_COALESCING_MAXCMDS {
size 1
+ dont_generate_debug_code
}
/*
@@ -3846,6 +4007,7 @@ scratch_ram {
*/
INT_COALESCING_MINCMDS {
size 1
+ dont_generate_debug_code
}
/*
@@ -3853,6 +4015,7 @@ scratch_ram {
*/
CMDS_PENDING {
size 2
+ dont_generate_debug_code
}
/*
@@ -3860,6 +4023,7 @@ scratch_ram {
*/
INT_COALESCING_CMDCOUNT {
size 1
+ dont_generate_debug_code
}
/*
@@ -3868,6 +4032,7 @@ scratch_ram {
*/
LOCAL_HS_MAILBOX {
size 1
+ dont_generate_debug_code
}
/*
* Target-mode CDB type to CDB length table used
@@ -3876,6 +4041,7 @@ scratch_ram {
CMDSIZE_TABLE {
size 8
count 8
+ dont_generate_debug_code
}
/*
* When an SCB with the MK_MESSAGE flag is
@@ -3908,25 +4074,31 @@ scb {
size 4
alias SCB_CDB_STORE
alias SCB_HOST_CDB_PTR
+ dont_generate_debug_code
}
SCB_RESIDUAL_SGPTR {
size 4
field SG_ADDR_MASK 0xf8 /* In the last byte */
field SG_OVERRUN_RESID 0x02 /* In the first byte */
field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
}
SCB_SCSI_STATUS {
size 1
alias SCB_HOST_CDB_LEN
+ dont_generate_debug_code
}
SCB_TARGET_PHASES {
size 1
+ dont_generate_debug_code
}
SCB_TARGET_DATA_DIR {
size 1
+ dont_generate_debug_code
}
SCB_TARGET_ITAG {
size 1
+ dont_generate_debug_code
}
SCB_SENSE_BUSADDR {
/*
@@ -3936,10 +4108,12 @@ scb {
*/
size 4
alias SCB_NEXT_COMPLETE
+ dont_generate_debug_code
}
SCB_TAG {
alias SCB_FIFO_USE_COUNT
size 2
+ dont_generate_debug_code
}
SCB_CONTROL {
size 1
@@ -3959,6 +4133,7 @@ scb {
SCB_LUN {
size 1
field LID 0xff
+ dont_generate_debug_code
}
SCB_TASK_ATTRIBUTE {
size 1
@@ -3967,16 +4142,20 @@ scb {
* ignore wide residue message handling.
*/
field SCB_XFERLEN_ODD 0x01
+ dont_generate_debug_code
}
SCB_CDB_LEN {
size 1
field SCB_CDB_LEN_PTR 0x80 /* CDB in host memory */
+ dont_generate_debug_code
}
SCB_TASK_MANAGEMENT {
size 1
+ dont_generate_debug_code
}
SCB_DATAPTR {
size 8
+ dont_generate_debug_code
}
SCB_DATACNT {
/*
@@ -3986,22 +4165,27 @@ scb {
size 4
field SG_LAST_SEG 0x80 /* In the fourth byte */
field SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
+ dont_generate_debug_code
}
SCB_SGPTR {
size 4
field SG_STATUS_VALID 0x04 /* In the first byte */
field SG_FULL_RESID 0x02 /* In the first byte */
field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
}
SCB_BUSADDR {
size 4
+ dont_generate_debug_code
}
SCB_NEXT {
alias SCB_NEXT_SCB_BUSADDR
size 2
+ dont_generate_debug_code
}
SCB_NEXT2 {
size 2
+ dont_generate_debug_code
}
SCB_SPARE {
size 8
@@ -4009,6 +4193,7 @@ scb {
}
SCB_DISCONNECTED_LISTS {
size 8
+ dont_generate_debug_code
}
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 55508b0fcec4..bdad54ec088c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -2472,8 +2472,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0)
ahd_outb(ahd, CLRLQOINT1, 0);
} else if ((status & SELTO) != 0) {
- u_int scbid;
-
/* Stop the selection */
ahd_outb(ahd, SCSISEQ0, 0);
@@ -2583,9 +2581,6 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
case BUSFREE_DFF0:
case BUSFREE_DFF1:
{
- u_int scbid;
- struct scb *scb;
-
mode = busfreetime == BUSFREE_DFF0
? AHD_MODE_DFF0 : AHD_MODE_DFF1;
ahd_set_modes(ahd, mode, mode);
@@ -3689,7 +3684,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
* by the capabilities of the bus connectivity of and sync settings for
* the target.
*/
-void
+static void
ahd_devlimited_syncrate(struct ahd_softc *ahd,
struct ahd_initiator_tinfo *tinfo,
u_int *period, u_int *ppr_options, role_t role)
@@ -4136,7 +4131,7 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
/*
* Harpoon2A assumed that there would be a
- * fallback rate between 160MHz and 80Mhz,
+ * fallback rate between 160MHz and 80MHz,
* so 7 is used as the period factor rather
* than 8 for 160MHz.
*/
@@ -8708,7 +8703,7 @@ ahd_reset_current_bus(struct ahd_softc *ahd)
int
ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
{
- struct ahd_devinfo devinfo;
+ struct ahd_devinfo caminfo;
u_int initiator;
u_int target;
u_int max_scsiid;
@@ -8729,7 +8724,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
ahd->pending_device = NULL;
- ahd_compile_devinfo(&devinfo,
+ ahd_compile_devinfo(&caminfo,
CAM_TARGET_WILDCARD,
CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD,
@@ -8868,7 +8863,7 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
}
/* Notify the XPT that a bus reset occurred */
- ahd_send_async(ahd, devinfo.channel, CAM_TARGET_WILDCARD,
+ ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD,
CAM_LUN_WILDCARD, AC_BUS_RESET);
ahd_restart(ahd);
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index c25b6adffbf9..a734d77e880e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -223,10 +223,10 @@ static const char *pci_bus_modes[] =
"PCI bus mode unknown",
"PCI bus mode unknown",
"PCI bus mode unknown",
- "PCI-X 101-133Mhz",
- "PCI-X 67-100Mhz",
- "PCI-X 50-66Mhz",
- "PCI 33 or 66Mhz"
+ "PCI-X 101-133MHz",
+ "PCI-X 67-100MHz",
+ "PCI-X 50-66MHz",
+ "PCI 33 or 66MHz"
};
#define TESTMODE 0x00000800ul
@@ -337,8 +337,6 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* 64bit bus (PCI64BIT set in devconfig).
*/
if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
- uint32_t devconfig;
-
if (bootverbose)
printf("%s: Enabling 39Bit Addressing\n",
ahd_name(ahd));
@@ -483,8 +481,6 @@ ahd_pci_test_register_access(struct ahd_softc *ahd)
goto fail;
if ((ahd_inb(ahd, INTSTAT) & PCIINT) != 0) {
- u_int targpcistat;
-
ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
targpcistat = ahd_inb(ahd, TARGPCISTAT);
if ((targpcistat & STA) != 0)
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
index c21ceab8e913..cdcead071ef6 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
@@ -34,13 +34,6 @@ ahd_reg_print_t ahd_seqintcode_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrint_print;
-#else
-#define ahd_clrint_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRINT", 0x03, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_error_print;
#else
#define ahd_error_print(regvalue, cur_col, wrap) \
@@ -48,20 +41,6 @@ ahd_reg_print_t ahd_error_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hcntrl_print;
-#else
-#define ahd_hcntrl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HCNTRL", 0x05, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hnscb_qoff_print;
-#else
-#define ahd_hnscb_qoff_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HNSCB_QOFF", 0x06, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_hescb_qoff_print;
#else
#define ahd_hescb_qoff_print(regvalue, cur_col, wrap) \
@@ -97,13 +76,6 @@ ahd_reg_print_t ahd_swtimer_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_snscb_qoff_print;
-#else
-#define ahd_snscb_qoff_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SNSCB_QOFF", 0x10, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sescb_qoff_print;
#else
#define ahd_sescb_qoff_print(regvalue, cur_col, wrap) \
@@ -111,20 +83,6 @@ ahd_reg_print_t ahd_sescb_qoff_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sdscb_qoff_print;
-#else
-#define ahd_sdscb_qoff_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SDSCB_QOFF", 0x14, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_qoff_ctlsta_print;
-#else
-#define ahd_qoff_ctlsta_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "QOFF_CTLSTA", 0x16, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_intctl_print;
#else
#define ahd_intctl_print(regvalue, cur_col, wrap) \
@@ -139,13 +97,6 @@ ahd_reg_print_t ahd_dfcntrl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dscommand0_print;
-#else
-#define ahd_dscommand0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DSCOMMAND0", 0x19, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dfstatus_print;
#else
#define ahd_dfstatus_print(regvalue, cur_col, wrap) \
@@ -160,13 +111,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sg_cache_pre_print;
-#else
-#define ahd_sg_cache_pre_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SG_CACHE_PRE", 0x1b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lqin_print;
#else
#define ahd_lqin_print(regvalue, cur_col, wrap) \
@@ -293,13 +237,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl1_print;
-#else
-#define ahd_sxfrctl1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SXFRCTL1", 0x3d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dffstat_print;
#else
#define ahd_dffstat_print(regvalue, cur_col, wrap) \
@@ -314,13 +251,6 @@ ahd_reg_print_t ahd_multargid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsisigo_print;
-#else
-#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scsisigi_print;
#else
#define ahd_scsisigi_print(regvalue, cur_col, wrap) \
@@ -363,13 +293,6 @@ ahd_reg_print_t ahd_selid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_optionmode_print;
-#else
-#define ahd_optionmode_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "OPTIONMODE", 0x4a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sblkctl_print;
#else
#define ahd_sblkctl_print(regvalue, cur_col, wrap) \
@@ -391,13 +314,6 @@ ahd_reg_print_t ahd_simode0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint0_print;
-#else
-#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sstat1_print;
#else
#define ahd_sstat1_print(regvalue, cur_col, wrap) \
@@ -405,13 +321,6 @@ ahd_reg_print_t ahd_sstat1_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint1_print;
-#else
-#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_sstat2_print;
#else
#define ahd_sstat2_print(regvalue, cur_col, wrap) \
@@ -461,17 +370,17 @@ ahd_reg_print_t ahd_lqistat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
+ahd_reg_print_t ahd_clrlqiint0_print;
#else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
+ahd_reg_print_t ahd_lqimode0_print;
#else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -629,17 +538,17 @@ ahd_reg_print_t ahd_seqintsrc_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqimode_print;
+ahd_reg_print_t ahd_currscb_print;
#else
-#define ahd_seqimode_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
+ahd_reg_print_t ahd_seqimode_print;
#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
+#define ahd_seqimode_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SEQIMODE", 0x5c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -657,13 +566,6 @@ ahd_reg_print_t ahd_lastscb_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shaddr_print;
-#else
-#define ahd_shaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SHADDR", 0x60, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_negoaddr_print;
#else
#define ahd_negoaddr_print(regvalue, cur_col, wrap) \
@@ -748,27 +650,6 @@ ahd_reg_print_t ahd_seloid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_haddr_print;
-#else
-#define ahd_haddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HADDR", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hcnt_print;
-#else
-#define ahd_hcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "HCNT", 0x78, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
-#else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbhaddr_print;
#else
#define ahd_scbhaddr_print(regvalue, cur_col, wrap) \
@@ -776,10 +657,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
+ahd_reg_print_t ahd_sghaddr_print;
#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -790,10 +671,10 @@ ahd_reg_print_t ahd_scbhcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dff_thrsh_print;
+ahd_reg_print_t ahd_sghcnt_print;
#else
-#define ahd_dff_thrsh_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFF_THRSH", 0x88, regvalue, cur_col, wrap)
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -867,13 +748,6 @@ ahd_reg_print_t ahd_targpcistat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbptr_print;
-#else
-#define ahd_scbptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCBPTR", 0xa8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scbautoptr_print;
#else
#define ahd_scbautoptr_print(regvalue, cur_col, wrap) \
@@ -881,13 +755,6 @@ ahd_reg_print_t ahd_scbautoptr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccsgaddr_print;
-#else
-#define ahd_ccsgaddr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSGADDR", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbaddr_print;
#else
#define ahd_ccscbaddr_print(regvalue, cur_col, wrap) \
@@ -909,13 +776,6 @@ ahd_reg_print_t ahd_ccsgctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccsgram_print;
-#else
-#define ahd_ccsgram_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "CCSGRAM", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_ccscbram_print;
#else
#define ahd_ccscbram_print(regvalue, cur_col, wrap) \
@@ -930,13 +790,6 @@ ahd_reg_print_t ahd_brddat_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brdctl_print;
-#else
-#define ahd_brdctl_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "BRDCTL", 0xb9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seeadr_print;
#else
#define ahd_seeadr_print(regvalue, cur_col, wrap) \
@@ -972,13 +825,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfdat_print;
-#else
-#define ahd_dfdat_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DFDAT", 0xc4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_dspselect_print;
#else
#define ahd_dspselect_print(regvalue, cur_col, wrap) \
@@ -1000,13 +846,6 @@ ahd_reg_print_t ahd_seqctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flags_print;
-#else
-#define ahd_flags_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "FLAGS", 0xd8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seqintctl_print;
#else
#define ahd_seqintctl_print(regvalue, cur_col, wrap) \
@@ -1014,13 +853,6 @@ ahd_reg_print_t ahd_seqintctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqram_print;
-#else
-#define ahd_seqram_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SEQRAM", 0xda, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_prgmcnt_print;
#else
#define ahd_prgmcnt_print(regvalue, cur_col, wrap) \
@@ -1028,41 +860,6 @@ ahd_reg_print_t ahd_prgmcnt_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_accum_print;
-#else
-#define ahd_accum_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ACCUM", 0xe0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sindex_print;
-#else
-#define ahd_sindex_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SINDEX", 0xe2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dindex_print;
-#else
-#define ahd_dindex_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DINDEX", 0xe4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_allones_print;
-#else
-#define ahd_allones_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ALLONES", 0xe8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_allzeros_print;
-#else
-#define ahd_allzeros_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ALLZEROS", 0xea, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_none_print;
#else
#define ahd_none_print(regvalue, cur_col, wrap) \
@@ -1070,27 +867,6 @@ ahd_reg_print_t ahd_none_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sindir_print;
-#else
-#define ahd_sindir_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SINDIR", 0xec, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dindir_print;
-#else
-#define ahd_dindir_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DINDIR", 0xed, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_stack_print;
-#else
-#define ahd_stack_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "STACK", 0xf2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_intvec1_addr_print;
#else
#define ahd_intvec1_addr_print(regvalue, cur_col, wrap) \
@@ -1126,17 +902,17 @@ ahd_reg_print_t ahd_accum_save_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sram_base_print;
+ahd_reg_print_t ahd_waiting_scb_tails_print;
#else
-#define ahd_sram_base_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
+ahd_reg_print_t ahd_sram_base_print;
#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1224,13 +1000,6 @@ ahd_reg_print_t ahd_msg_out_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dmaparams_print;
-#else
-#define ahd_dmaparams_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "DMAPARAMS", 0x138, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_seq_flags_print;
#else
#define ahd_seq_flags_print(regvalue, cur_col, wrap) \
@@ -1238,20 +1007,6 @@ ahd_reg_print_t ahd_seq_flags_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_saved_scsiid_print;
-#else
-#define ahd_saved_scsiid_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SAVED_SCSIID", 0x13a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_saved_lun_print;
-#else
-#define ahd_saved_lun_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SAVED_LUN", 0x13b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_lastphase_print;
#else
#define ahd_lastphase_print(regvalue, cur_col, wrap) \
@@ -1273,20 +1028,6 @@ ahd_reg_print_t ahd_kernel_tqinpos_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_tqinpos_print;
-#else
-#define ahd_tqinpos_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "TQINPOS", 0x13f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shared_data_addr_print;
-#else
-#define ahd_shared_data_addr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x140, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_qoutfifo_next_addr_print;
#else
#define ahd_qoutfifo_next_addr_print(regvalue, cur_col, wrap) \
@@ -1294,20 +1035,6 @@ ahd_reg_print_t ahd_qoutfifo_next_addr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_arg_1_print;
-#else
-#define ahd_arg_1_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ARG_1", 0x148, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_arg_2_print;
-#else
-#define ahd_arg_2_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "ARG_2", 0x149, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_last_msg_print;
#else
#define ahd_last_msg_print(regvalue, cur_col, wrap) \
@@ -1406,13 +1133,6 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
-#else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_base_print;
#else
#define ahd_scb_base_print(regvalue, cur_col, wrap) \
@@ -1420,17 +1140,10 @@ ahd_reg_print_t ahd_scb_base_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_sgptr_print;
-#else
-#define ahd_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0x184, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_scsi_status_print;
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
#else
-#define ahd_scb_scsi_status_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_SCSI_STATUS", 0x188, regvalue, cur_col, wrap)
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+ ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
#endif
#if AIC_DEBUG_REGISTERS
@@ -1476,13 +1189,6 @@ ahd_reg_print_t ahd_scb_task_attribute_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_cdb_len_print;
-#else
-#define ahd_scb_cdb_len_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_CDB_LEN", 0x196, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_task_management_print;
#else
#define ahd_scb_task_management_print(regvalue, cur_col, wrap) \
@@ -1518,13 +1224,6 @@ ahd_reg_print_t ahd_scb_busaddr_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_next_print;
-#else
-#define ahd_scb_next_print(regvalue, cur_col, wrap) \
- ahd_print_register(NULL, 0, "SCB_NEXT", 0x1ac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahd_reg_print_t ahd_scb_next2_print;
#else
#define ahd_scb_next2_print(regvalue, cur_col, wrap) \
@@ -1717,10 +1416,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SG_CACHE_PRE 0x1b
-#define TYPEPTR 0x20
-
#define LQIN 0x20
+#define TYPEPTR 0x20
+
#define TAGPTR 0x21
#define LUNPTR 0x22
@@ -1780,6 +1479,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SINGLECMD 0x02
#define ABORTPENDING 0x01
+#define SCSBIST0 0x39
+#define GSBISTERR 0x40
+#define GSBISTDONE 0x20
+#define GSBISTRUN 0x10
+#define OSBISTERR 0x04
+#define OSBISTDONE 0x02
+#define OSBISTRUN 0x01
+
#define LQCTL2 0x39
#define LQIRETRY 0x80
#define LQICONTINUE 0x40
@@ -1790,13 +1497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQOTOIDLE 0x02
#define LQOPAUSE 0x01
-#define SCSBIST0 0x39
-#define GSBISTERR 0x40
-#define GSBISTDONE 0x20
-#define GSBISTRUN 0x10
-#define OSBISTERR 0x04
-#define OSBISTDONE 0x02
-#define OSBISTRUN 0x01
+#define SCSBIST1 0x3a
+#define NTBISTERR 0x04
+#define NTBISTDONE 0x02
+#define NTBISTRUN 0x01
#define SCSISEQ0 0x3a
#define TEMODEO 0x80
@@ -1805,15 +1509,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define FORCEBUSFREE 0x10
#define SCSIRSTO 0x01
-#define SCSBIST1 0x3a
-#define NTBISTERR 0x04
-#define NTBISTDONE 0x02
-#define NTBISTRUN 0x01
-
#define SCSISEQ1 0x3b
-#define BUSINITID 0x3c
-
#define SXFRCTL0 0x3c
#define DFON 0x80
#define DFPEXP 0x40
@@ -1822,6 +1519,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DLCOUNT 0x3c
+#define BUSINITID 0x3c
+
#define SXFRCTL1 0x3d
#define BITBUCKET 0x80
#define ENSACHK 0x40
@@ -1846,8 +1545,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CURRFIFO_1 0x01
#define CURRFIFO_0 0x00
-#define MULTARGID 0x40
-
#define SCSISIGO 0x40
#define CDO 0x80
#define IOO 0x40
@@ -1858,6 +1555,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define REQO 0x02
#define ACKO 0x01
+#define MULTARGID 0x40
+
#define SCSISIGI 0x41
#define ATNI 0x10
#define SELI 0x08
@@ -1904,6 +1603,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENAB20 0x04
#define SELWIDE 0x02
+#define CLRSINT0 0x4b
+#define CLRSELDO 0x40
+#define CLRSELDI 0x20
+#define CLRSELINGO 0x10
+#define CLRIOERR 0x08
+#define CLROVERRUN 0x04
+#define CLRSPIORDY 0x02
+#define CLRARBDO 0x01
+
#define SSTAT0 0x4b
#define TARGET 0x80
#define SELDO 0x40
@@ -1923,14 +1631,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENSPIORDY 0x02
#define ENARBDO 0x01
-#define CLRSINT0 0x4b
-#define CLRSELDO 0x40
-#define CLRSELDI 0x20
-#define CLRSELINGO 0x10
-#define CLRIOERR 0x08
-#define CLROVERRUN 0x04
-#define CLRSPIORDY 0x02
-#define CLRARBDO 0x01
+#define CLRSINT1 0x4c
+#define CLRSELTIMEO 0x80
+#define CLRATNO 0x40
+#define CLRSCSIRSTI 0x20
+#define CLRBUSFREE 0x08
+#define CLRSCSIPERR 0x04
+#define CLRSTRB2FAST 0x02
+#define CLRREQINIT 0x01
#define SSTAT1 0x4c
#define SELTO 0x80
@@ -1942,15 +1650,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define STRB2FAST 0x02
#define REQINIT 0x01
-#define CLRSINT1 0x4c
-#define CLRSELTIMEO 0x80
-#define CLRATNO 0x40
-#define CLRSCSIRSTI 0x20
-#define CLRBUSFREE 0x08
-#define CLRSCSIPERR 0x04
-#define CLRSTRB2FAST 0x02
-#define CLRREQINIT 0x01
-
#define SSTAT2 0x4d
#define BUSFREETIME 0xc0
#define NONPACKREQ 0x20
@@ -1998,14 +1697,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LQIATNLQ 0x02
#define LQIATNCMD 0x01
-#define LQIMODE0 0x50
-#define ENLQIATNQASK 0x20
-#define ENLQICRCT1 0x10
-#define ENLQICRCT2 0x08
-#define ENLQIBADLQT 0x04
-#define ENLQIATNLQ 0x02
-#define ENLQIATNCMD 0x01
-
#define CLRLQIINT0 0x50
#define CLRLQIATNQAS 0x20
#define CLRLQICRCT1 0x10
@@ -2014,6 +1705,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CLRLQIATNLQ 0x02
#define CLRLQIATNCMD 0x01
+#define LQIMODE0 0x50
+#define ENLQIATNQASK 0x20
+#define ENLQICRCT1 0x10
+#define ENLQICRCT2 0x08
+#define ENLQIBADLQT 0x04
+#define ENLQIATNLQ 0x02
+#define ENLQIATNCMD 0x01
+
#define LQIMODE1 0x51
#define ENLQIPHASE_LQ 0x80
#define ENLQIPHASE_NLQ 0x40
@@ -2160,6 +1859,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CFG4ICMD 0x02
#define CFG4TCMD 0x01
+#define CURRSCB 0x5c
+
#define SEQIMODE 0x5c
#define ENCTXTDONE 0x40
#define ENSAVEPTRS 0x20
@@ -2169,8 +1870,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ENCFG4ICMD 0x02
#define ENCFG4TCMD 0x01
-#define CURRSCB 0x5c
-
#define MDFFSTAT 0x5d
#define SHCNTNEGATIVE 0x40
#define SHCNTMINUS1 0x20
@@ -2185,29 +1884,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DFFTAG 0x5e
+#define LASTSCB 0x5e
+
#define SCSITEST 0x5e
#define CNTRTEST 0x08
#define SEL_TXPLL_DEBUG 0x04
-#define LASTSCB 0x5e
-
#define IOPDNCTL 0x5f
#define DISABLE_OE 0x80
#define PDN_IDIST 0x04
#define PDN_DIFFSENSE 0x01
-#define DGRPCRCI 0x60
-
#define SHADDR 0x60
#define NEGOADDR 0x60
-#define NEGPERIOD 0x61
+#define DGRPCRCI 0x60
-#define NEGOFFSET 0x62
+#define NEGPERIOD 0x61
#define PACKCRCI 0x62
+#define NEGOFFSET 0x62
+
#define NEGPPROPTS 0x63
#define PPROPT_PACE 0x08
#define PPROPT_QAS 0x04
@@ -2253,8 +1952,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SELOID 0x6b
-#define FAIRNESS 0x6c
-
#define PLL400CTL0 0x6c
#define PLL_VCOSEL 0x80
#define PLL_PWDN 0x40
@@ -2264,6 +1961,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PLL_DLPF 0x02
#define PLL_ENFBM 0x01
+#define FAIRNESS 0x6c
+
#define PLL400CTL1 0x6d
#define PLL_CNTEN 0x80
#define PLL_CNTCLR 0x40
@@ -2275,25 +1974,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define HADDR 0x70
-#define HODMAADR 0x70
-
#define PLLDELAY 0x70
#define SPLIT_DROP_REQ 0x80
-#define HCNT 0x78
+#define HODMAADR 0x70
#define HODMACNT 0x78
-#define HODMAEN 0x7a
+#define HCNT 0x78
-#define SGHADDR 0x7c
+#define HODMAEN 0x7a
#define SCBHADDR 0x7c
-#define SGHCNT 0x84
+#define SGHADDR 0x7c
#define SCBHCNT 0x84
+#define SGHCNT 0x84
+
#define DFF_THRSH 0x88
#define WR_DFTHRSH 0x70
#define RD_DFTHRSH 0x07
@@ -2326,10 +2025,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CMCRXMSG0 0x90
-#define OVLYRXMSG0 0x90
-
-#define DCHRXMSG0 0x90
-
#define ROENABLE 0x90
#define MSIROEN 0x20
#define OVLYROEN 0x10
@@ -2338,11 +2033,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1ROEN 0x02
#define DCH0ROEN 0x01
-#define OVLYRXMSG1 0x91
+#define OVLYRXMSG0 0x90
-#define CMCRXMSG1 0x91
+#define DCHRXMSG0 0x90
-#define DCHRXMSG1 0x91
+#define OVLYRXMSG1 0x91
#define NSENABLE 0x91
#define MSINSEN 0x20
@@ -2352,6 +2047,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define DCH1NSEN 0x02
#define DCH0NSEN 0x01
+#define CMCRXMSG1 0x91
+
+#define DCHRXMSG1 0x91
+
#define DCHRXMSG2 0x92
#define CMCRXMSG2 0x92
@@ -2375,24 +2074,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TSCSERREN 0x02
#define CMPABCDIS 0x01
-#define CMCSEQBCNT 0x94
-
#define OVLYSEQBCNT 0x94
#define DCHSEQBCNT 0x94
+#define CMCSEQBCNT 0x94
+
+#define CMCSPLTSTAT0 0x96
+
#define DCHSPLTSTAT0 0x96
#define OVLYSPLTSTAT0 0x96
-#define CMCSPLTSTAT0 0x96
+#define CMCSPLTSTAT1 0x97
#define OVLYSPLTSTAT1 0x97
#define DCHSPLTSTAT1 0x97
-#define CMCSPLTSTAT1 0x97
-
#define SGRXMSG0 0x98
#define CDNUM 0xf8
#define CFNUM 0x07
@@ -2420,15 +2119,18 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define TAG_NUM 0x1f
#define RLXORD 0x10
+#define SGSEQBCNT 0x9c
+
#define SLVSPLTOUTATTR0 0x9c
#define LOWER_BCNT 0xff
-#define SGSEQBCNT 0x9c
-
#define SLVSPLTOUTATTR1 0x9d
#define CMPLT_DNUM 0xf8
#define CMPLT_FNUM 0x07
+#define SLVSPLTOUTATTR2 0x9e
+#define CMPLT_BNUM 0xff
+
#define SGSPLTSTAT0 0x9e
#define STAETERM 0x80
#define SCBCERR 0x40
@@ -2439,9 +2141,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define RXSCEMSG 0x02
#define RXSPLTRSP 0x01
-#define SLVSPLTOUTATTR2 0x9e
-#define CMPLT_BNUM 0xff
-
#define SGSPLTSTAT1 0x9f
#define RXDATABUCKET 0x01
@@ -2497,10 +2196,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define CCSGADDR 0xac
-#define CCSCBADDR 0xac
-
#define CCSCBADR_BK 0xac
+#define CCSCBADDR 0xac
+
#define CMC_RAMBIST 0xad
#define SG_ELEMENT_SIZE 0x80
#define SCBRAMBIST_FAIL 0x40
@@ -2554,9 +2253,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SEEDAT 0xbc
#define SEECTL 0xbe
-#define SEEOP_EWDS 0x40
#define SEEOP_WALL 0x40
#define SEEOP_EWEN 0x40
+#define SEEOP_EWDS 0x40
#define SEEOPCODE 0x70
#define SEERST 0x02
#define SEESTART 0x01
@@ -2573,25 +2272,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCBCNT 0xbf
+#define DFWADDR 0xc0
+
#define DSPFLTRCTL 0xc0
#define FLTRDISABLE 0x20
#define EDGESENSE 0x10
#define DSPFCNTSEL 0x0f
-#define DFWADDR 0xc0
-
#define DSPDATACTL 0xc1
#define BYPASSENAB 0x80
#define DESQDIS 0x10
#define RCVROFFSTDIS 0x04
#define XMITOFFSTDIS 0x02
+#define DFRADDR 0xc2
+
#define DSPREQCTL 0xc2
#define MANREQCTL 0xc0
#define MANREQDLY 0x3f
-#define DFRADDR 0xc2
-
#define DSPACKCTL 0xc3
#define MANACKCTL 0xc0
#define MANACKDLY 0x3f
@@ -2612,14 +2311,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define WRTBIASCALC 0xc7
-#define DFPTRS 0xc8
-
#define RCVRBIASCALC 0xc8
-#define DFBKPTR 0xc9
+#define DFPTRS 0xc8
#define SKEWCALC 0xc9
+#define DFBKPTR 0xc9
+
#define DFDBCTL 0xcb
#define DFF_CIO_WR_RDY 0x20
#define DFF_CIO_RD_RDY 0x10
@@ -2704,12 +2403,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define ACCUM_SAVE 0xfa
+#define WAITING_SCB_TAILS 0x100
+
#define AHD_PCI_CONFIG_BASE 0x100
#define SRAM_BASE 0x100
-#define WAITING_SCB_TAILS 0x100
-
#define WAITING_TID_HEAD 0x120
#define WAITING_TID_TAIL 0x122
@@ -2738,8 +2437,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define PRELOADEN 0x80
#define WIDEODD 0x40
#define SCSIEN 0x20
-#define SDMAENACK 0x10
#define SDMAEN 0x10
+#define SDMAENACK 0x10
#define HDMAEN 0x08
#define HDMAENACK 0x08
#define DIRECTION 0x04
@@ -2837,12 +2536,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define MK_MESSAGE_SCSIID 0x162
+#define SCB_BASE 0x180
+
#define SCB_RESIDUAL_DATACNT 0x180
#define SCB_CDB_STORE 0x180
#define SCB_HOST_CDB_PTR 0x180
-#define SCB_BASE 0x180
-
#define SCB_RESIDUAL_SGPTR 0x184
#define SG_ADDR_MASK 0xf8
#define SG_OVERRUN_RESID 0x02
@@ -2910,17 +2609,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define SCB_DISCONNECTED_LISTS 0x1b8
-#define CMD_GROUP_CODE_SHIFT 0x05
-#define STIMESEL_MIN 0x18
-#define STIMESEL_SHIFT 0x03
-#define INVALID_ADDR 0x80
-#define AHD_PRECOMP_MASK 0x07
-#define TARGET_DATA_IN 0x01
-#define CCSCBADDR_MAX 0x80
-#define NUMDSPS 0x14
-#define SEEOP_EWEN_ADDR 0xc0
-#define AHD_ANNEXCOL_PER_DEV0 0x04
-#define DST_MODE_SHIFT 0x04
#define AHD_TIMER_MAX_US 0x18ffe7
#define AHD_TIMER_MAX_TICKS 0xffff
#define AHD_SENSE_BUFSIZE 0x100
@@ -2955,32 +2643,43 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
#define LUNLEN_SINGLE_LEVEL_LUN 0x0f
#define NVRAM_SCB_OFFSET 0x2c
#define STATUS_PKT_SENSE 0xff
+#define CMD_GROUP_CODE_SHIFT 0x05
#define MAX_OFFSET_PACED_BUG 0x7f
#define STIMESEL_BUG_ADJ 0x08
+#define STIMESEL_MIN 0x18
+#define STIMESEL_SHIFT 0x03
#define CCSGRAM_MAXSEGS 0x10
+#define INVALID_ADDR 0x80
#define SEEOP_ERAL_ADDR 0x80
#define AHD_SLEWRATE_DEF_REVB 0x08
#define AHD_PRECOMP_CUTBACK_17 0x04
+#define AHD_PRECOMP_MASK 0x07
#define SRC_MODE_SHIFT 0x00
#define PKT_OVERRUN_BUFSIZE 0x200
#define SCB_TRANSFER_SIZE_1BYTE_LUN 0x30
+#define TARGET_DATA_IN 0x01
#define HOST_MSG 0xff
#define MAX_OFFSET 0xfe
#define BUS_16_BIT 0x01
+#define CCSCBADDR_MAX 0x80
+#define NUMDSPS 0x14
+#define SEEOP_EWEN_ADDR 0xc0
+#define AHD_ANNEXCOL_PER_DEV0 0x04
+#define DST_MODE_SHIFT 0x04
/* Downloaded Constant Definitions */
-#define SG_SIZEOF 0x04
-#define SG_PREFETCH_ALIGN_MASK 0x02
-#define SG_PREFETCH_CNT_LIMIT 0x01
#define CACHELINE_MASK 0x07
#define SCB_TRANSFER_SIZE 0x06
#define PKT_OVERRUN_BUFOFFSET 0x05
+#define SG_SIZEOF 0x04
#define SG_PREFETCH_ADDR_MASK 0x03
+#define SG_PREFETCH_ALIGN_MASK 0x02
+#define SG_PREFETCH_CNT_LIMIT 0x01
#define SG_PREFETCH_CNT 0x00
#define DOWNLOAD_CONST_COUNT 0x08
/* Exported Labels */
-#define LABEL_timer_isr 0x28b
#define LABEL_seq_isr 0x28f
+#define LABEL_timer_isr 0x28b
diff --git a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
index c4c8a96bf5a3..f5ea715d6ac3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
@@ -8,18 +8,6 @@
#include "aic79xx_osm.h"
-static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
- { "SRC_MODE", 0x07, 0x07 },
- { "DST_MODE", 0x70, 0x70 }
-};
-
-int
-ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(MODE_PTR_parse_table, 2, "MODE_PTR",
- 0x00, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
{ "SPLTINT", 0x01, 0x01 },
{ "CMDCMPLT", 0x02, 0x02 },
@@ -39,110 +27,6 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
- { "NO_SEQINT", 0x00, 0xff },
- { "BAD_PHASE", 0x01, 0xff },
- { "SEND_REJECT", 0x02, 0xff },
- { "PROTO_VIOLATION", 0x03, 0xff },
- { "NO_MATCH", 0x04, 0xff },
- { "IGN_WIDE_RES", 0x05, 0xff },
- { "PDATA_REINIT", 0x06, 0xff },
- { "HOST_MSG_LOOP", 0x07, 0xff },
- { "BAD_STATUS", 0x08, 0xff },
- { "DATA_OVERRUN", 0x09, 0xff },
- { "MKMSG_FAILED", 0x0a, 0xff },
- { "MISSED_BUSFREE", 0x0b, 0xff },
- { "DUMP_CARD_STATE", 0x0c, 0xff },
- { "ILLEGAL_PHASE", 0x0d, 0xff },
- { "INVALID_SEQINT", 0x0e, 0xff },
- { "CFG4ISTAT_INTR", 0x0f, 0xff },
- { "STATUS_OVERRUN", 0x10, 0xff },
- { "CFG4OVERRUN", 0x11, 0xff },
- { "ENTERING_NONPACK", 0x12, 0xff },
- { "TASKMGMT_FUNC_COMPLETE",0x13, 0xff },
- { "TASKMGMT_CMD_CMPLT_OKAY",0x14, 0xff },
- { "TRACEPOINT0", 0x15, 0xff },
- { "TRACEPOINT1", 0x16, 0xff },
- { "TRACEPOINT2", 0x17, 0xff },
- { "TRACEPOINT3", 0x18, 0xff },
- { "SAW_HWERR", 0x19, 0xff },
- { "BAD_SCB_STATUS", 0x1a, 0xff }
-};
-
-int
-ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SEQINTCODE_parse_table, 27, "SEQINTCODE",
- 0x02, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
- { "CLRSPLTINT", 0x01, 0x01 },
- { "CLRCMDINT", 0x02, 0x02 },
- { "CLRSEQINT", 0x04, 0x04 },
- { "CLRSCSIINT", 0x08, 0x08 },
- { "CLRPCIINT", 0x10, 0x10 },
- { "CLRSWTMINT", 0x20, 0x20 },
- { "CLRBRKADRINT", 0x40, 0x40 },
- { "CLRHWERRINT", 0x80, 0x80 }
-};
-
-int
-ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRINT_parse_table, 8, "CLRINT",
- 0x03, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
- { "DSCTMOUT", 0x02, 0x02 },
- { "ILLOPCODE", 0x04, 0x04 },
- { "SQPARERR", 0x08, 0x08 },
- { "DPARERR", 0x10, 0x10 },
- { "MPARERR", 0x20, 0x20 },
- { "CIOACCESFAIL", 0x40, 0x40 },
- { "CIOPARERR", 0x80, 0x80 }
-};
-
-int
-ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ERROR_parse_table, 7, "ERROR",
- 0x04, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
- { "CHIPRST", 0x01, 0x01 },
- { "CHIPRSTACK", 0x01, 0x01 },
- { "INTEN", 0x02, 0x02 },
- { "PAUSE", 0x04, 0x04 },
- { "SWTIMER_START_B", 0x08, 0x08 },
- { "SWINT", 0x10, 0x10 },
- { "POWRDN", 0x40, 0x40 },
- { "SEQ_RESET", 0x80, 0x80 }
-};
-
-int
-ahd_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(HCNTRL_parse_table, 8, "HCNTRL",
- 0x05, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HNSCB_QOFF",
- 0x06, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HESCB_QOFF",
- 0x08, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
{ "ENINT_COALESCE", 0x40, 0x40 },
{ "HOST_TQINPOS", 0x80, 0x80 }
@@ -170,77 +54,6 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0c, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
- { "CLRSEQ_SPLTINT", 0x01, 0x01 },
- { "CLRSEQ_PCIINT", 0x02, 0x02 },
- { "CLRSEQ_SCSIINT", 0x04, 0x04 },
- { "CLRSEQ_SEQINT", 0x08, 0x08 },
- { "CLRSEQ_SWTMRTO", 0x10, 0x10 }
-};
-
-int
-ahd_clrseqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSEQINTSTAT_parse_table, 5, "CLRSEQINTSTAT",
- 0x0c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_swtimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SWTIMER",
- 0x0e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SNSCB_QOFF",
- 0x10, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SESCB_QOFF",
- 0x12, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SDSCB_QOFF",
- 0x14, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
- { "SCB_QSIZE_4", 0x00, 0x0f },
- { "SCB_QSIZE_8", 0x01, 0x0f },
- { "SCB_QSIZE_16", 0x02, 0x0f },
- { "SCB_QSIZE_32", 0x03, 0x0f },
- { "SCB_QSIZE_64", 0x04, 0x0f },
- { "SCB_QSIZE_128", 0x05, 0x0f },
- { "SCB_QSIZE_256", 0x06, 0x0f },
- { "SCB_QSIZE_512", 0x07, 0x0f },
- { "SCB_QSIZE_1024", 0x08, 0x0f },
- { "SCB_QSIZE_2048", 0x09, 0x0f },
- { "SCB_QSIZE_4096", 0x0a, 0x0f },
- { "SCB_QSIZE_8192", 0x0b, 0x0f },
- { "SCB_QSIZE_16384", 0x0c, 0x0f },
- { "SCB_QSIZE", 0x0f, 0x0f },
- { "HS_MAILBOX_ACT", 0x10, 0x10 },
- { "SDSCB_ROLLOVR", 0x20, 0x20 },
- { "NEW_SCB_AVAIL", 0x40, 0x40 },
- { "EMPTY_SCB_AVAIL", 0x80, 0x80 }
-};
-
-int
-ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(QOFF_CTLSTA_parse_table, 18, "QOFF_CTLSTA",
- 0x16, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
{ "SPLTINTEN", 0x01, 0x01 },
{ "SEQINTEN", 0x02, 0x02 },
@@ -280,22 +93,6 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x19, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
- { "CIOPARCKEN", 0x01, 0x01 },
- { "DISABLE_TWATE", 0x02, 0x02 },
- { "EXTREQLCK", 0x10, 0x10 },
- { "MPARCKEN", 0x20, 0x20 },
- { "DPARCKEN", 0x40, 0x40 },
- { "CACHETHEN", 0x80, 0x80 }
-};
-
-int
-ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSCOMMAND0_parse_table, 6, "DSCOMMAND0",
- 0x19, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
{ "FIFOEMP", 0x01, 0x01 },
{ "FIFOFULL", 0x02, 0x02 },
@@ -327,146 +124,6 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1b, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
- { "LAST_SEG", 0x02, 0x02 },
- { "ODD_SEG", 0x04, 0x04 },
- { "SG_ADDR_MASK", 0xf8, 0xf8 }
-};
-
-int
-ahd_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
- 0x1b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQIN",
- 0x20, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LUNPTR",
- 0x22, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDLENPTR",
- 0x25, regvalue, cur_col, wrap));
-}
-
-int
-ahd_attrptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ATTRPTR",
- 0x26, regvalue, cur_col, wrap));
-}
-
-int
-ahd_flagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "FLAGPTR",
- 0x27, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDPTR",
- 0x28, regvalue, cur_col, wrap));
-}
-
-int
-ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "QNEXTPTR",
- 0x29, regvalue, cur_col, wrap));
-}
-
-int
-ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ABRTBYTEPTR",
- 0x2b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ABRTBITPTR",
- 0x2c, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
- { "ILUNLEN", 0x0f, 0x0f },
- { "TLUNLEN", 0xf0, 0xf0 }
-};
-
-int
-ahd_lunlen_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LUNLEN_parse_table, 2, "LUNLEN",
- 0x30, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cdblimit_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CDBLIMIT",
- 0x31, regvalue, cur_col, wrap));
-}
-
-int
-ahd_maxcmd_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMD",
- 0x32, regvalue, cur_col, wrap));
-}
-
-int
-ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MAXCMDCNT",
- 0x33, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
- { "ABORTPENDING", 0x01, 0x01 },
- { "SINGLECMD", 0x02, 0x02 },
- { "PCI2PCI", 0x04, 0x04 }
-};
-
-int
-ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQCTL1_parse_table, 3, "LQCTL1",
- 0x38, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
- { "LQOPAUSE", 0x01, 0x01 },
- { "LQOTOIDLE", 0x02, 0x02 },
- { "LQOCONTINUE", 0x04, 0x04 },
- { "LQORETRY", 0x08, 0x08 },
- { "LQIPAUSE", 0x10, 0x10 },
- { "LQITOIDLE", 0x20, 0x20 },
- { "LQICONTINUE", 0x40, 0x40 },
- { "LQIRETRY", 0x80, 0x80 }
-};
-
-int
-ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQCTL2_parse_table, 8, "LQCTL2",
- 0x39, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
{ "SCSIRSTO", 0x01, 0x01 },
{ "FORCEBUSFREE", 0x10, 0x10 },
@@ -498,37 +155,6 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3b, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
- { "SPIOEN", 0x08, 0x08 },
- { "BIOSCANCELEN", 0x10, 0x10 },
- { "DFPEXP", 0x40, 0x40 },
- { "DFON", 0x80, 0x80 }
-};
-
-int
-ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SXFRCTL0_parse_table, 4, "SXFRCTL0",
- 0x3c, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
- { "STPWEN", 0x01, 0x01 },
- { "ACTNEGEN", 0x02, 0x02 },
- { "ENSTIMER", 0x04, 0x04 },
- { "STIMESEL", 0x18, 0x18 },
- { "ENSPCHK", 0x20, 0x20 },
- { "ENSACHK", 0x40, 0x40 },
- { "BITBUCKET", 0x80, 0x80 }
-};
-
-int
-ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
- 0x3d, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
{ "CURRFIFO_0", 0x00, 0x03 },
{ "CURRFIFO_1", 0x01, 0x03 },
@@ -545,40 +171,6 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3f, regvalue, cur_col, wrap));
}
-int
-ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MULTARGID",
- 0x40, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
- { "P_DATAOUT", 0x00, 0xe0 },
- { "P_DATAOUT_DT", 0x20, 0xe0 },
- { "P_DATAIN", 0x40, 0xe0 },
- { "P_DATAIN_DT", 0x60, 0xe0 },
- { "P_COMMAND", 0x80, 0xe0 },
- { "P_MESGOUT", 0xa0, 0xe0 },
- { "P_STATUS", 0xc0, 0xe0 },
- { "P_MESGIN", 0xe0, 0xe0 },
- { "ACKO", 0x01, 0x01 },
- { "REQO", 0x02, 0x02 },
- { "BSYO", 0x04, 0x04 },
- { "SELO", 0x08, 0x08 },
- { "ATNO", 0x10, 0x10 },
- { "MSGO", 0x20, 0x20 },
- { "IOO", 0x40, 0x40 },
- { "CDO", 0x80, 0x80 },
- { "PHASE_MASK", 0xe0, 0xe0 }
-};
-
-int
-ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSISIGO_parse_table, 17, "SCSISIGO",
- 0x40, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -624,31 +216,12 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCSIDAT",
- 0x44, regvalue, cur_col, wrap));
-}
-
-int
ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCSIBUS",
0x46, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
- { "TARGID", 0x0f, 0x0f },
- { "CLKOUT", 0x80, 0x80 }
-};
-
-int
-ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(TARGIDIN_parse_table, 2, "TARGIDIN",
- 0x48, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SELID_parse_table[] = {
{ "ONEBIT", 0x08, 0x08 },
{ "SELID_MASK", 0xf0, 0xf0 }
@@ -661,38 +234,6 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x49, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
- { "AUTO_MSGOUT_DE", 0x02, 0x02 },
- { "ENDGFORMCHK", 0x04, 0x04 },
- { "BUSFREEREV", 0x10, 0x10 },
- { "BIASCANCTL", 0x20, 0x20 },
- { "AUTOACKEN", 0x40, 0x40 },
- { "BIOSCANCTL", 0x80, 0x80 },
- { "OPTIONMODE_DEFAULTS",0x02, 0x02 }
-};
-
-int
-ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(OPTIONMODE_parse_table, 7, "OPTIONMODE",
- 0x4a, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
- { "SELWIDE", 0x02, 0x02 },
- { "ENAB20", 0x04, 0x04 },
- { "ENAB40", 0x08, 0x08 },
- { "DIAGLEDON", 0x40, 0x40 },
- { "DIAGLEDEN", 0x80, 0x80 }
-};
-
-int
-ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SBLKCTL_parse_table, 5, "SBLKCTL",
- 0x4a, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "ARBDO", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
@@ -728,23 +269,6 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4b, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
- { "CLRARBDO", 0x01, 0x01 },
- { "CLRSPIORDY", 0x02, 0x02 },
- { "CLROVERRUN", 0x04, 0x04 },
- { "CLRIOERR", 0x08, 0x08 },
- { "CLRSELINGO", 0x10, 0x10 },
- { "CLRSELDI", 0x20, 0x20 },
- { "CLRSELDO", 0x40, 0x40 }
-};
-
-int
-ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
- 0x4b, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "STRB2FAST", 0x02, 0x02 },
@@ -763,23 +287,6 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4c, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
- { "CLRREQINIT", 0x01, 0x01 },
- { "CLRSTRB2FAST", 0x02, 0x02 },
- { "CLRSCSIPERR", 0x04, 0x04 },
- { "CLRBUSFREE", 0x08, 0x08 },
- { "CLRSCSIRSTI", 0x20, 0x20 },
- { "CLRATNO", 0x40, 0x40 },
- { "CLRSELTIMEO", 0x80, 0x80 }
-};
-
-int
-ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
- 0x4c, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
{ "BUSFREE_LQO", 0x40, 0xc0 },
{ "BUSFREE_DFF0", 0x80, 0xc0 },
@@ -800,20 +307,6 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x4d, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
- { "CLRDMADONE", 0x01, 0x01 },
- { "CLRSDONE", 0x02, 0x02 },
- { "CLRWIDE_RES", 0x04, 0x04 },
- { "CLRNONPACKREQ", 0x20, 0x20 }
-};
-
-int
-ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT2_parse_table, 4, "CLRSINT2",
- 0x4d, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
{ "DTERR", 0x01, 0x01 },
{ "DGFORMERR", 0x02, 0x02 },
@@ -833,26 +326,12 @@ ahd_perrdiag_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_lqistate_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQISTATE",
- 0x4e, regvalue, cur_col, wrap));
-}
-
-int
ahd_soffcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SOFFCNT",
0x4f, regvalue, cur_col, wrap));
}
-int
-ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LQOSTATE",
- 0x4f, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
{ "LQIATNCMD", 0x01, 0x01 },
{ "LQIATNLQ", 0x02, 0x02 },
@@ -869,56 +348,6 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x50, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
- { "ENLQIATNCMD", 0x01, 0x01 },
- { "ENLQIATNLQ", 0x02, 0x02 },
- { "ENLQIBADLQT", 0x04, 0x04 },
- { "ENLQICRCT2", 0x08, 0x08 },
- { "ENLQICRCT1", 0x10, 0x10 },
- { "ENLQIATNQASK", 0x20, 0x20 }
-};
-
-int
-ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQIMODE0_parse_table, 6, "LQIMODE0",
- 0x50, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
- { "CLRLQIATNCMD", 0x01, 0x01 },
- { "CLRLQIATNLQ", 0x02, 0x02 },
- { "CLRLQIBADLQT", 0x04, 0x04 },
- { "CLRLQICRCT2", 0x08, 0x08 },
- { "CLRLQICRCT1", 0x10, 0x10 },
- { "CLRLQIATNQAS", 0x20, 0x20 }
-};
-
-int
-ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
- 0x50, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
- { "ENLQIOVERI_NLQ", 0x01, 0x01 },
- { "ENLQIOVERI_LQ", 0x02, 0x02 },
- { "ENLQIBADLQI", 0x04, 0x04 },
- { "ENLQICRCI_NLQ", 0x08, 0x08 },
- { "ENLQICRCI_LQ", 0x10, 0x10 },
- { "ENLIQABORT", 0x20, 0x20 },
- { "ENLQIPHASE_NLQ", 0x40, 0x40 },
- { "ENLQIPHASE_LQ", 0x80, 0x80 }
-};
-
-int
-ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQIMODE1_parse_table, 8, "LQIMODE1",
- 0x51, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
{ "LQIOVERI_NLQ", 0x01, 0x01 },
{ "LQIOVERI_LQ", 0x02, 0x02 },
@@ -937,24 +366,6 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x51, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
- { "CLRLQIOVERI_NLQ", 0x01, 0x01 },
- { "CLRLQIOVERI_LQ", 0x02, 0x02 },
- { "CLRLQIBADLQI", 0x04, 0x04 },
- { "CLRLQICRCI_NLQ", 0x08, 0x08 },
- { "CLRLQICRCI_LQ", 0x10, 0x10 },
- { "CLRLIQABORT", 0x20, 0x20 },
- { "CLRLQIPHASE_NLQ", 0x40, 0x40 },
- { "CLRLQIPHASE_LQ", 0x80, 0x80 }
-};
-
-int
-ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQIINT1_parse_table, 8, "CLRLQIINT1",
- 0x51, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
{ "LQIGSAVAIL", 0x01, 0x01 },
{ "LQISTOPCMD", 0x02, 0x02 },
@@ -985,30 +396,6 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x53, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
- { "ENOSRAMPERR", 0x01, 0x01 },
- { "ENNTRAMPERR", 0x02, 0x02 }
-};
-
-int
-ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SIMODE3_parse_table, 2, "SIMODE3",
- 0x53, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
- { "CLROSRAMPERR", 0x01, 0x01 },
- { "CLRNTRAMPERR", 0x02, 0x02 }
-};
-
-int
-ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSINT3_parse_table, 2, "CLRSINT3",
- 0x53, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
{ "LQOTCRC", 0x01, 0x01 },
{ "LQOATNPKT", 0x02, 0x02 },
@@ -1024,51 +411,6 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x54, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
- { "CLRLQOTCRC", 0x01, 0x01 },
- { "CLRLQOATNPKT", 0x02, 0x02 },
- { "CLRLQOATNLQ", 0x04, 0x04 },
- { "CLRLQOSTOPT2", 0x08, 0x08 },
- { "CLRLQOTARGSCBPERR", 0x10, 0x10 }
-};
-
-int
-ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQOINT0_parse_table, 5, "CLRLQOINT0",
- 0x54, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
- { "ENLQOTCRC", 0x01, 0x01 },
- { "ENLQOATNPKT", 0x02, 0x02 },
- { "ENLQOATNLQ", 0x04, 0x04 },
- { "ENLQOSTOPT2", 0x08, 0x08 },
- { "ENLQOTARGSCBPERR", 0x10, 0x10 }
-};
-
-int
-ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQOMODE0_parse_table, 5, "LQOMODE0",
- 0x54, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
- { "ENLQOPHACHGINPKT", 0x01, 0x01 },
- { "ENLQOBUSFREE", 0x02, 0x02 },
- { "ENLQOBADQAS", 0x04, 0x04 },
- { "ENLQOSTOPI2", 0x08, 0x08 },
- { "ENLQOINITSCBPERR", 0x10, 0x10 }
-};
-
-int
-ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQOMODE1_parse_table, 5, "LQOMODE1",
- 0x55, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
{ "LQOPHACHGINPKT", 0x01, 0x01 },
{ "LQOBUSFREE", 0x02, 0x02 },
@@ -1084,21 +426,6 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x55, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
- { "CLRLQOPHACHGINPKT", 0x01, 0x01 },
- { "CLRLQOBUSFREE", 0x02, 0x02 },
- { "CLRLQOBADQAS", 0x04, 0x04 },
- { "CLRLQOSTOPI2", 0x08, 0x08 },
- { "CLRLQOINITSCBPERR", 0x10, 0x10 }
-};
-
-int
-ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRLQOINT1_parse_table, 5, "CLRLQOINT1",
- 0x55, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
{ "LQOSTOP0", 0x01, 0x01 },
{ "LQOPHACHGOUTPKT", 0x02, 0x02 },
@@ -1113,13 +440,6 @@ ahd_lqostat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x56, regvalue, cur_col, wrap));
}
-int
-ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "OS_SPACE_CNT",
- 0x56, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
{ "ENREQINIT", 0x01, 0x01 },
{ "ENSTRB2FAST", 0x02, 0x02 },
@@ -1138,13 +458,6 @@ ahd_simode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x57, regvalue, cur_col, wrap));
}
-int
-ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "GSFIFO",
- 0x58, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
{ "RSTCHN", 0x01, 0x01 },
{ "CLRCHN", 0x02, 0x02 },
@@ -1159,44 +472,6 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5a, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
- { "LQONOCHKOVER", 0x01, 0x01 },
- { "LQONOHOLDLACK", 0x02, 0x02 },
- { "LQOBUSETDLY", 0x40, 0x40 },
- { "LQOH2A_VERSION", 0x80, 0x80 }
-};
-
-int
-ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
- 0x5a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NEXTSCB",
- 0x5a, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
- { "CLRCFG4TCMD", 0x01, 0x01 },
- { "CLRCFG4ICMD", 0x02, 0x02 },
- { "CLRCFG4TSTAT", 0x04, 0x04 },
- { "CLRCFG4ISTAT", 0x08, 0x08 },
- { "CLRCFG4DATA", 0x10, 0x10 },
- { "CLRSAVEPTRS", 0x20, 0x20 },
- { "CLRCTXTDONE", 0x40, 0x40 }
-};
-
-int
-ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(CLRSEQINTSRC_parse_table, 7, "CLRSEQINTSRC",
- 0x5b, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
{ "CFG4TCMD", 0x01, 0x01 },
{ "CFG4ICMD", 0x02, 0x02 },
@@ -1231,13 +506,6 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x5c, regvalue, cur_col, wrap));
}
-int
-ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CURRSCB",
- 0x5c, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
{ "FIFOFREE", 0x01, 0x01 },
{ "DATAINFIFO", 0x02, 0x02 },
@@ -1256,308 +524,12 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LASTSCB",
- 0x5e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SHADDR",
- 0x60, regvalue, cur_col, wrap));
-}
-
-int
-ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NEGOADDR",
- 0x60, regvalue, cur_col, wrap));
-}
-
-int
-ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NEGPERIOD",
- 0x61, regvalue, cur_col, wrap));
-}
-
-int
-ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NEGOFFSET",
- 0x62, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
- { "PPROPT_IUT", 0x01, 0x01 },
- { "PPROPT_DT", 0x02, 0x02 },
- { "PPROPT_QAS", 0x04, 0x04 },
- { "PPROPT_PACE", 0x08, 0x08 }
-};
-
-int
-ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NEGPPROPTS_parse_table, 4, "NEGPPROPTS",
- 0x63, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
- { "WIDEXFER", 0x01, 0x01 },
- { "ENAUTOATNO", 0x02, 0x02 },
- { "ENAUTOATNI", 0x04, 0x04 },
- { "ENSLOWCRC", 0x08, 0x08 },
- { "RTI_OVRDTRN", 0x10, 0x10 },
- { "RTI_WRTDIS", 0x20, 0x20 },
- { "ENSNAPSHOT", 0x40, 0x40 }
-};
-
-int
-ahd_negconopts_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NEGCONOPTS_parse_table, 7, "NEGCONOPTS",
- 0x64, regvalue, cur_col, wrap));
-}
-
-int
-ahd_annexcol_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ANNEXCOL",
- 0x65, regvalue, cur_col, wrap));
-}
-
-int
-ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ANNEXDAT",
- 0x66, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
- { "LSTSGCLRDIS", 0x01, 0x01 },
- { "SHVALIDSTDIS", 0x02, 0x02 },
- { "DFFACTCLR", 0x04, 0x04 },
- { "SDONEMSKDIS", 0x08, 0x08 },
- { "WIDERESEN", 0x10, 0x10 },
- { "CURRFIFODEF", 0x20, 0x20 },
- { "STSELSKIDDIS", 0x40, 0x40 },
- { "BIDICHKDIS", 0x80, 0x80 }
-};
-
-int
-ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
- 0x66, regvalue, cur_col, wrap));
-}
-
-int
-ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "IOWNID",
- 0x67, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SHCNT",
- 0x68, regvalue, cur_col, wrap));
-}
-
-int
-ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TOWNID",
- 0x69, regvalue, cur_col, wrap));
-}
-
-int
ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SELOID",
0x6b, regvalue, cur_col, wrap));
}
-int
-ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HADDR",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "HCNT",
- 0x78, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGHADDR",
- 0x7c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCBHADDR",
- 0x7c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SGHCNT",
- 0x84, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCBHCNT",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
- { "WR_DFTHRSH_MIN", 0x00, 0x70 },
- { "RD_DFTHRSH_MIN", 0x00, 0x07 },
- { "RD_DFTHRSH_25", 0x01, 0x07 },
- { "RD_DFTHRSH_50", 0x02, 0x07 },
- { "RD_DFTHRSH_63", 0x03, 0x07 },
- { "RD_DFTHRSH_75", 0x04, 0x07 },
- { "RD_DFTHRSH_85", 0x05, 0x07 },
- { "RD_DFTHRSH_90", 0x06, 0x07 },
- { "RD_DFTHRSH_MAX", 0x07, 0x07 },
- { "WR_DFTHRSH_25", 0x10, 0x70 },
- { "WR_DFTHRSH_50", 0x20, 0x70 },
- { "WR_DFTHRSH_63", 0x30, 0x70 },
- { "WR_DFTHRSH_75", 0x40, 0x70 },
- { "WR_DFTHRSH_85", 0x50, 0x70 },
- { "WR_DFTHRSH_90", 0x60, 0x70 },
- { "WR_DFTHRSH_MAX", 0x70, 0x70 },
- { "RD_DFTHRSH", 0x07, 0x07 },
- { "WR_DFTHRSH", 0x70, 0x70 }
-};
-
-int
-ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
- 0x88, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
- { "CMPABCDIS", 0x01, 0x01 },
- { "TSCSERREN", 0x02, 0x02 },
- { "SRSPDPEEN", 0x04, 0x04 },
- { "SPLTSTADIS", 0x08, 0x08 },
- { "SPLTSMADIS", 0x10, 0x10 },
- { "UNEXPSCIEN", 0x20, 0x20 },
- { "SERRPULSE", 0x80, 0x80 }
-};
-
-int
-ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(PCIXCTL_parse_table, 7, "PCIXCTL",
- 0x93, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHSPLTSTAT0_parse_table, 8, "DCHSPLTSTAT0",
- 0x96, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
- 0x97, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
- { "RXSPLTRSP", 0x01, 0x01 },
- { "RXSCEMSG", 0x02, 0x02 },
- { "RXOVRUN", 0x04, 0x04 },
- { "CNTNOTCMPLT", 0x08, 0x08 },
- { "SCDATBUCKET", 0x10, 0x10 },
- { "SCADERR", 0x20, 0x20 },
- { "SCBCERR", 0x40, 0x40 },
- { "STAETERM", 0x80, 0x80 }
-};
-
-int
-ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGSPLTSTAT0_parse_table, 8, "SGSPLTSTAT0",
- 0x9e, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
- { "RXDATABUCKET", 0x01, 0x01 }
-};
-
-int
-ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SGSPLTSTAT1_parse_table, 1, "SGSPLTSTAT1",
- 0x9f, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
- { "DPR", 0x01, 0x01 },
- { "TWATERR", 0x02, 0x02 },
- { "RDPERR", 0x04, 0x04 },
- { "SCAAPERR", 0x08, 0x08 },
- { "RTA", 0x10, 0x10 },
- { "RMA", 0x20, 0x20 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_df0pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DF0PCISTAT_parse_table, 8, "DF0PCISTAT",
- 0xa0, regvalue, cur_col, wrap));
-}
-
-int
-ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "REG0",
- 0xa0, regvalue, cur_col, wrap));
-}
-
-int
-ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "REG_ISR",
- 0xa4, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
{ "SEGS_AVAIL", 0x01, 0x01 },
{ "LOADING_NEEDED", 0x02, 0x02 },
@@ -1571,54 +543,6 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa6, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
- { "TWATERR", 0x02, 0x02 },
- { "STA", 0x08, 0x08 },
- { "SSE", 0x40, 0x40 },
- { "DPE", 0x80, 0x80 }
-};
-
-int
-ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(TARGPCISTAT_parse_table, 4, "TARGPCISTAT",
- 0xa7, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCBPTR",
- 0xa8, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
- { "SCBPTR_OFF", 0x07, 0x07 },
- { "SCBPTR_ADDR", 0x38, 0x38 },
- { "AUSCBPTR_EN", 0x80, 0x80 }
-};
-
-int
-ahd_scbautoptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCBAUTOPTR_parse_table, 3, "SCBAUTOPTR",
- 0xab, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSGADDR",
- 0xac, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBADDR",
- 0xac, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
{ "CCSCBRESET", 0x01, 0x01 },
{ "CCSCBDIR", 0x04, 0x04 },
@@ -1651,138 +575,6 @@ ahd_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xad, regvalue, cur_col, wrap));
}
-int
-ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSGRAM",
- 0xb0, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CCSCBRAM",
- 0xb0, regvalue, cur_col, wrap));
-}
-
-int
-ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "BRDDAT",
- 0xb8, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
- { "BRDSTB", 0x01, 0x01 },
- { "BRDRW", 0x02, 0x02 },
- { "BRDEN", 0x04, 0x04 },
- { "BRDADDR", 0x38, 0x38 },
- { "FLXARBREQ", 0x40, 0x40 },
- { "FLXARBACK", 0x80, 0x80 }
-};
-
-int
-ahd_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(BRDCTL_parse_table, 6, "BRDCTL",
- 0xb9, regvalue, cur_col, wrap));
-}
-
-int
-ahd_seeadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SEEADR",
- 0xba, regvalue, cur_col, wrap));
-}
-
-int
-ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SEEDAT",
- 0xbc, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
- { "SEEOP_ERAL", 0x40, 0x70 },
- { "SEEOP_WRITE", 0x50, 0x70 },
- { "SEEOP_READ", 0x60, 0x70 },
- { "SEEOP_ERASE", 0x70, 0x70 },
- { "SEESTART", 0x01, 0x01 },
- { "SEERST", 0x02, 0x02 },
- { "SEEOPCODE", 0x70, 0x70 },
- { "SEEOP_EWEN", 0x40, 0x40 },
- { "SEEOP_WALL", 0x40, 0x40 },
- { "SEEOP_EWDS", 0x40, 0x40 }
-};
-
-int
-ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SEECTL_parse_table, 10, "SEECTL",
- 0xbe, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
- { "SEESTART", 0x01, 0x01 },
- { "SEEBUSY", 0x02, 0x02 },
- { "SEEARBACK", 0x04, 0x04 },
- { "LDALTID_L", 0x08, 0x08 },
- { "SEEOPCODE", 0x70, 0x70 },
- { "INIT_DONE", 0x80, 0x80 }
-};
-
-int
-ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SEESTAT_parse_table, 6, "SEESTAT",
- 0xbe, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
- { "XMITOFFSTDIS", 0x02, 0x02 },
- { "RCVROFFSTDIS", 0x04, 0x04 },
- { "DESQDIS", 0x10, 0x10 },
- { "BYPASSENAB", 0x80, 0x80 }
-};
-
-int
-ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPDATACTL_parse_table, 4, "DSPDATACTL",
- 0xc1, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DFDAT",
- 0xc4, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
- { "DSPSEL", 0x1f, 0x1f },
- { "AUTOINCEN", 0x80, 0x80 }
-};
-
-int
-ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DSPSELECT_parse_table, 2, "DSPSELECT",
- 0xc4, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
- { "XMITMANVAL", 0x3f, 0x3f },
- { "AUTOXBCDIS", 0x80, 0x80 }
-};
-
-int
-ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(WRTBIASCTL_parse_table, 2, "WRTBIASCTL",
- 0xc5, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
@@ -1801,18 +593,6 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xd6, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
- { "CARRY", 0x01, 0x01 },
- { "ZERO", 0x02, 0x02 }
-};
-
-int
-ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(FLAGS_parse_table, 2, "FLAGS",
- 0xd8, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
{ "IRET", 0x01, 0x01 },
{ "INTMASK1", 0x02, 0x02 },
@@ -1831,118 +611,6 @@ ahd_seqintctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SEQRAM",
- 0xda, regvalue, cur_col, wrap));
-}
-
-int
-ahd_prgmcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "PRGMCNT",
- 0xde, regvalue, cur_col, wrap));
-}
-
-int
-ahd_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ACCUM",
- 0xe0, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SINDEX",
- 0xe2, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DINDEX",
- 0xe4, regvalue, cur_col, wrap));
-}
-
-int
-ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ALLONES",
- 0xe8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ALLZEROS",
- 0xea, regvalue, cur_col, wrap));
-}
-
-int
-ahd_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NONE",
- 0xea, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SINDIR",
- 0xec, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "DINDIR",
- 0xed, regvalue, cur_col, wrap));
-}
-
-int
-ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "STACK",
- 0xf2, regvalue, cur_col, wrap));
-}
-
-int
-ahd_intvec1_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INTVEC1_ADDR",
- 0xf4, regvalue, cur_col, wrap));
-}
-
-int
-ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CURADDR",
- 0xf4, regvalue, cur_col, wrap));
-}
-
-int
-ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INTVEC2_ADDR",
- 0xf6, regvalue, cur_col, wrap));
-}
-
-int
-ahd_longjmp_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LONGJMP_ADDR",
- 0xf8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ACCUM_SAVE",
- 0xfa, regvalue, cur_col, wrap));
-}
-
-int
ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SRAM_BASE",
@@ -1950,69 +618,6 @@ ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
- 0x100, regvalue, cur_col, wrap));
-}
-
-int
-ahd_waiting_tid_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WAITING_TID_HEAD",
- 0x120, regvalue, cur_col, wrap));
-}
-
-int
-ahd_waiting_tid_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "WAITING_TID_TAIL",
- 0x122, regvalue, cur_col, wrap));
-}
-
-int
-ahd_next_queued_scb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "NEXT_QUEUED_SCB_ADDR",
- 0x124, regvalue, cur_col, wrap));
-}
-
-int
-ahd_complete_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "COMPLETE_SCB_HEAD",
- 0x128, regvalue, cur_col, wrap));
-}
-
-int
-ahd_complete_scb_dmainprog_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "COMPLETE_SCB_DMAINPROG_HEAD",
- 0x12a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_complete_dma_scb_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_HEAD",
- 0x12c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_complete_dma_scb_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "COMPLETE_DMA_SCB_TAIL",
- 0x12e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_complete_on_qfreeze_head_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "COMPLETE_ON_QFREEZE_HEAD",
- 0x130, regvalue, cur_col, wrap));
-}
-
-int
ahd_qfreeze_count_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "QFREEZE_COUNT",
@@ -2033,33 +638,6 @@ ahd_saved_mode_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x136, regvalue, cur_col, wrap));
}
-int
-ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "MSG_OUT",
- 0x137, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
- { "FIFORESET", 0x01, 0x01 },
- { "FIFOFLUSH", 0x02, 0x02 },
- { "DIRECTION", 0x04, 0x04 },
- { "HDMAEN", 0x08, 0x08 },
- { "HDMAENACK", 0x08, 0x08 },
- { "SDMAEN", 0x10, 0x10 },
- { "SDMAENACK", 0x10, 0x10 },
- { "SCSIEN", 0x20, 0x20 },
- { "WIDEODD", 0x40, 0x40 },
- { "PRELOADEN", 0x80, 0x80 }
-};
-
-int
-ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
- 0x138, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
@@ -2079,20 +657,6 @@ ahd_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x139, regvalue, cur_col, wrap));
}
-int
-ahd_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SAVED_SCSIID",
- 0x13a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SAVED_LUN",
- 0x13b, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "P_DATAOUT", 0x00, 0xe0 },
{ "P_DATAOUT_DT", 0x20, 0xe0 },
@@ -2116,96 +680,6 @@ ahd_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x13c, regvalue, cur_col, wrap));
}
-int
-ahd_qoutfifo_entry_valid_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "QOUTFIFO_ENTRY_VALID_TAG",
- 0x13d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "KERNEL_TQINPOS",
- 0x13e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "TQINPOS",
- 0x13f, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SHARED_DATA_ADDR",
- 0x140, regvalue, cur_col, wrap));
-}
-
-int
-ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "QOUTFIFO_NEXT_ADDR",
- 0x144, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
- { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
- { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
- { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
- { "EXIT_MSG_LOOP", 0x08, 0x08 },
- { "MSGOUT_PHASEMIS", 0x10, 0x10 },
- { "SEND_REJ", 0x20, 0x20 },
- { "SEND_SENSE", 0x40, 0x40 },
- { "SEND_MSG", 0x80, 0x80 }
-};
-
-int
-ahd_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(ARG_1_parse_table, 8, "ARG_1",
- 0x148, regvalue, cur_col, wrap));
-}
-
-int
-ahd_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ARG_2",
- 0x149, regvalue, cur_col, wrap));
-}
-
-int
-ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LAST_MSG",
- 0x14a, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
- { "ALTSTIM", 0x01, 0x01 },
- { "ENAUTOATNP", 0x02, 0x02 },
- { "MANUALP", 0x0c, 0x0c },
- { "ENRSELI", 0x10, 0x10 },
- { "ENSELI", 0x20, 0x20 },
- { "MANUALCTL", 0x40, 0x40 }
-};
-
-int
-ahd_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
- 0x14b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INITIATOR_TAG",
- 0x14c, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
{ "PENDING_MK_MESSAGE", 0x01, 0x01 },
{ "TARGET_MSG_PENDING", 0x02, 0x02 },
@@ -2220,62 +694,6 @@ ahd_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_allocfifo_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "ALLOCFIFO_SCBPTR",
- 0x14e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_int_coalescing_timer_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INT_COALESCING_TIMER",
- 0x150, regvalue, cur_col, wrap));
-}
-
-int
-ahd_int_coalescing_maxcmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INT_COALESCING_MAXCMDS",
- 0x152, regvalue, cur_col, wrap));
-}
-
-int
-ahd_int_coalescing_mincmds_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INT_COALESCING_MINCMDS",
- 0x153, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmds_pending_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDS_PENDING",
- 0x154, regvalue, cur_col, wrap));
-}
-
-int
-ahd_int_coalescing_cmdcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "INT_COALESCING_CMDCOUNT",
- 0x156, regvalue, cur_col, wrap));
-}
-
-int
-ahd_local_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "LOCAL_HS_MAILBOX",
- 0x157, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdsize_table_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "CMDSIZE_TABLE",
- 0x158, regvalue, cur_col, wrap));
-}
-
-int
ahd_mk_message_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "MK_MESSAGE_SCB",
@@ -2290,53 +708,12 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
- 0x180, regvalue, cur_col, wrap));
-}
-
-int
ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahd_print_register(NULL, 0, "SCB_BASE",
0x180, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
- { "SG_LIST_NULL", 0x01, 0x01 },
- { "SG_OVERRUN_RESID", 0x02, 0x02 },
- { "SG_ADDR_MASK", 0xf8, 0xf8 }
-};
-
-int
-ahd_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_RESIDUAL_SGPTR_parse_table, 3, "SCB_RESIDUAL_SGPTR",
- 0x184, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_SCSI_STATUS",
- 0x188, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_SENSE_BUSADDR",
- 0x18c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TAG",
- 0x190, regvalue, cur_col, wrap));
-}
-
static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "SCB_TAG_TYPE", 0x03, 0x03 },
{ "DISCONNECTED", 0x04, 0x04 },
@@ -2366,103 +743,3 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x193, regvalue, cur_col, wrap));
}
-static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
- { "LID", 0xff, 0xff }
-};
-
-int
-ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_LUN_parse_table, 1, "SCB_LUN",
- 0x194, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
- { "SCB_XFERLEN_ODD", 0x01, 0x01 }
-};
-
-int
-ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_TASK_ATTRIBUTE_parse_table, 1, "SCB_TASK_ATTRIBUTE",
- 0x195, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
- { "SCB_CDB_LEN_PTR", 0x80, 0x80 }
-};
-
-int
-ahd_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_CDB_LEN_parse_table, 1, "SCB_CDB_LEN",
- 0x196, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_task_management_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_TASK_MANAGEMENT",
- 0x197, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_DATAPTR",
- 0x198, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
- { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f },
- { "SG_LAST_SEG", 0x80, 0x80 }
-};
-
-int
-ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
- 0x1a0, regvalue, cur_col, wrap));
-}
-
-static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
- { "SG_LIST_NULL", 0x01, 0x01 },
- { "SG_FULL_RESID", 0x02, 0x02 },
- { "SG_STATUS_VALID", 0x04, 0x04 }
-};
-
-int
-ahd_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
- 0x1a4, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_BUSADDR",
- 0x1a8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_NEXT",
- 0x1ac, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_NEXT2",
- 0x1ae, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahd_print_register(NULL, 0, "SCB_DISCONNECTED_LISTS",
- 0x1b8, regvalue, cur_col, wrap));
-}
-
diff --git a/drivers/scsi/aic7xxx/aic7xxx.reg b/drivers/scsi/aic7xxx/aic7xxx.reg
index 0d2f763c3427..9a96e55da39a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.reg
+++ b/drivers/scsi/aic7xxx/aic7xxx.reg
@@ -51,6 +51,17 @@ VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
*/
/*
+ * Registers marked "dont_generate_debug_code" are not (yet) referenced
+ * from the driver code, and this keyword inhibit generation
+ * of debug code for them.
+ *
+ * REG_PRETTY_PRINT config will complain if dont_generate_debug_code
+ * is added to the register which is referenced in the driver.
+ * Unreferenced register with no dont_generate_debug_code will result
+ * in dead code. No warning is issued.
+ */
+
+/*
* SCSI Sequence Control (p. 3-11).
* Each bit, when set starts a specific SCSI sequence on the bus
*/
@@ -97,6 +108,7 @@ register SXFRCTL1 {
field ENSTIMER 0x04
field ACTNEGEN 0x02
field STPWEN 0x01 /* Powered Termination */
+ dont_generate_debug_code
}
/*
@@ -155,6 +167,7 @@ register SCSISIGO {
mask P_MESGOUT CDI|MSGI
mask P_STATUS CDI|IOI
mask P_MESGIN CDI|IOI|MSGI
+ dont_generate_debug_code
}
/*
@@ -194,6 +207,7 @@ register SCSIID {
*/
alias SCSIOFFSET
mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */
+ dont_generate_debug_code
}
/*
@@ -205,6 +219,7 @@ register SCSIID {
register SCSIDATL {
address 0x006
access_mode RW
+ dont_generate_debug_code
}
register SCSIDATH {
@@ -223,6 +238,7 @@ register STCNT {
address 0x008
size 3
access_mode RW
+ dont_generate_debug_code
}
/* ALT_MODE registers (Ultra2 and Ultra160 chips) */
@@ -248,6 +264,7 @@ register OPTIONMODE {
field AUTO_MSGOUT_DE 0x02
field DIS_MSGIN_DUALEDGE 0x01
mask OPTIONMODE_DEFAULTS AUTO_MSGOUT_DE|DIS_MSGIN_DUALEDGE
+ dont_generate_debug_code
}
/* ALT_MODE register on Ultra160 chips */
@@ -256,6 +273,7 @@ register TARGCRCCNT {
size 2
access_mode RW
count 2
+ dont_generate_debug_code
}
/*
@@ -271,6 +289,7 @@ register CLRSINT0 {
field CLRSWRAP 0x08
field CLRIOERR 0x08 /* Ultra2 Only */
field CLRSPIORDY 0x02
+ dont_generate_debug_code
}
/*
@@ -306,6 +325,7 @@ register CLRSINT1 {
field CLRSCSIPERR 0x04
field CLRPHASECHG 0x02
field CLRREQINIT 0x01
+ dont_generate_debug_code
}
/*
@@ -360,6 +380,7 @@ register SCSIID_ULTRA2 {
access_mode RW
mask TID 0xf0 /* Target ID mask */
mask OID 0x0f /* Our ID mask */
+ dont_generate_debug_code
}
/*
@@ -425,6 +446,7 @@ register SHADDR {
address 0x014
size 4
access_mode RO
+ dont_generate_debug_code
}
/*
@@ -441,6 +463,7 @@ register SELTIMER {
field STAGE2 0x02
field STAGE1 0x01
alias TARGIDIN
+ dont_generate_debug_code
}
/*
@@ -453,6 +476,7 @@ register SELID {
access_mode RW
mask SELID_MASK 0xf0
field ONEBIT 0x08
+ dont_generate_debug_code
}
register SCAMCTL {
@@ -473,6 +497,7 @@ register TARGID {
size 2
access_mode RW
count 14
+ dont_generate_debug_code
}
/*
@@ -495,6 +520,7 @@ register SPIOCAP {
field EEPROM 0x04 /* Writable external BIOS ROM */
field ROM 0x02 /* Logic for accessing external ROM */
field SSPIOCPS 0x01 /* Termination and cable detection */
+ dont_generate_debug_code
}
register BRDCTL {
@@ -514,6 +540,7 @@ register BRDCTL {
field BRDDAT2 0x04
field BRDRW_ULTRA2 0x02
field BRDSTB_ULTRA2 0x01
+ dont_generate_debug_code
}
/*
@@ -551,6 +578,7 @@ register SEECTL {
field SEECK 0x04
field SEEDO 0x02
field SEEDI 0x01
+ dont_generate_debug_code
}
/*
* SCSI Block Control (p. 3-32)
@@ -601,6 +629,7 @@ register SEQRAM {
address 0x061
access_mode RW
count 2
+ dont_generate_debug_code
}
/*
@@ -610,6 +639,7 @@ register SEQRAM {
register SEQADDR0 {
address 0x062
access_mode RW
+ dont_generate_debug_code
}
register SEQADDR1 {
@@ -617,6 +647,7 @@ register SEQADDR1 {
access_mode RW
count 8
mask SEQADDR1_MASK 0x01
+ dont_generate_debug_code
}
/*
@@ -627,35 +658,41 @@ register ACCUM {
address 0x064
access_mode RW
accumulator
+ dont_generate_debug_code
}
register SINDEX {
address 0x065
access_mode RW
sindex
+ dont_generate_debug_code
}
register DINDEX {
address 0x066
access_mode RW
+ dont_generate_debug_code
}
register ALLONES {
address 0x069
access_mode RO
allones
+ dont_generate_debug_code
}
register ALLZEROS {
address 0x06a
access_mode RO
allzeros
+ dont_generate_debug_code
}
register NONE {
address 0x06a
access_mode WO
none
+ dont_generate_debug_code
}
register FLAGS {
@@ -664,16 +701,19 @@ register FLAGS {
count 18
field ZERO 0x02
field CARRY 0x01
+ dont_generate_debug_code
}
register SINDIR {
address 0x06c
access_mode RO
+ dont_generate_debug_code
}
register DINDIR {
address 0x06d
access_mode WO
+ dont_generate_debug_code
}
register FUNCTION1 {
@@ -685,6 +725,7 @@ register STACK {
address 0x06f
access_mode RO
count 5
+ dont_generate_debug_code
}
const STACK_SIZE 4
@@ -716,6 +757,7 @@ register DSCOMMAND0 {
field RAMPS 0x04 /* External SCB RAM Present */
field USCBSIZE32 0x02 /* Use 32byte SCB Page Size */
field CIOPARCKEN 0x01 /* Internal bus parity error enable */
+ dont_generate_debug_code
}
register DSCOMMAND1 {
@@ -724,6 +766,7 @@ register DSCOMMAND1 {
mask DSLATT 0xfc /* PCI latency timer (non-ultra2) */
field HADDLDSEL1 0x02 /* Host Address Load Select Bits */
field HADDLDSEL0 0x01
+ dont_generate_debug_code
}
/*
@@ -735,6 +778,7 @@ register BUSTIME {
count 2
mask BOFF 0xf0
mask BON 0x0f
+ dont_generate_debug_code
}
/*
@@ -749,6 +793,7 @@ register BUSSPD {
mask STBON 0x07
mask DFTHRSH_100 0xc0
mask DFTHRSH_75 0x80
+ dont_generate_debug_code
}
/* aic7850/55/60/70/80/95 only */
@@ -756,6 +801,7 @@ register DSPCISTATUS {
address 0x086
count 4
mask DFTHRSH_100 0xc0
+ dont_generate_debug_code
}
/* aic7890/91/96/97 only */
@@ -764,6 +810,7 @@ register HS_MAILBOX {
mask HOST_MAILBOX 0xF0
mask SEQ_MAILBOX 0x0F
mask HOST_TQINPOS 0x80 /* Boundary at either 0 or 128 */
+ dont_generate_debug_code
}
const HOST_MAILBOX_SHIFT 4
@@ -784,6 +831,7 @@ register HCNTRL {
field INTEN 0x02
field CHIPRST 0x01
field CHIPRSTACK 0x01
+ dont_generate_debug_code
}
/*
@@ -795,12 +843,14 @@ register HADDR {
address 0x088
size 4
access_mode RW
+ dont_generate_debug_code
}
register HCNT {
address 0x08c
size 3
access_mode RW
+ dont_generate_debug_code
}
/*
@@ -810,6 +860,7 @@ register HCNT {
register SCBPTR {
address 0x090
access_mode RW
+ dont_generate_debug_code
}
/*
@@ -878,6 +929,7 @@ register INTSTAT {
mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */
mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT)
+ dont_generate_debug_code
}
/*
@@ -911,6 +963,7 @@ register CLRINT {
field CLRSCSIINT 0x04
field CLRCMDINT 0x02
field CLRSEQINT 0x01
+ dont_generate_debug_code
}
register DFCNTRL {
@@ -944,6 +997,7 @@ register DFSTATUS {
register DFWADDR {
address 0x95
access_mode RW
+ dont_generate_debug_code
}
register DFRADDR {
@@ -954,6 +1008,7 @@ register DFRADDR {
register DFDAT {
address 0x099
access_mode RW
+ dont_generate_debug_code
}
/*
@@ -967,6 +1022,7 @@ register SCBCNT {
count 1
field SCBAUTO 0x80
mask SCBCNT_MASK 0x1f
+ dont_generate_debug_code
}
/*
@@ -977,6 +1033,7 @@ register QINFIFO {
address 0x09b
access_mode RW
count 12
+ dont_generate_debug_code
}
/*
@@ -996,6 +1053,7 @@ register QOUTFIFO {
address 0x09d
access_mode WO
count 7
+ dont_generate_debug_code
}
register CRCCONTROL1 {
@@ -1008,6 +1066,7 @@ register CRCCONTROL1 {
field CRCREQCHKEN 0x10
field TARGCRCENDEN 0x08
field TARGCRCCNTEN 0x04
+ dont_generate_debug_code
}
@@ -1040,6 +1099,7 @@ register SFUNCT {
access_mode RW
count 4
field ALT_MODE 0x80
+ dont_generate_debug_code
}
/*
@@ -1053,24 +1113,31 @@ scb {
size 4
alias SCB_RESIDUAL_DATACNT
alias SCB_CDB_STORE
+ dont_generate_debug_code
}
SCB_RESIDUAL_SGPTR {
size 4
+ dont_generate_debug_code
}
SCB_SCSI_STATUS {
size 1
+ dont_generate_debug_code
}
SCB_TARGET_PHASES {
size 1
+ dont_generate_debug_code
}
SCB_TARGET_DATA_DIR {
size 1
+ dont_generate_debug_code
}
SCB_TARGET_ITAG {
size 1
+ dont_generate_debug_code
}
SCB_DATAPTR {
size 4
+ dont_generate_debug_code
}
SCB_DATACNT {
/*
@@ -1080,12 +1147,14 @@ scb {
size 4
field SG_LAST_SEG 0x80 /* In the fourth byte */
mask SG_HIGH_ADDR_BITS 0x7F /* In the fourth byte */
+ dont_generate_debug_code
}
SCB_SGPTR {
size 4
field SG_RESID_VALID 0x04 /* In the first byte */
field SG_FULL_RESID 0x02 /* In the first byte */
field SG_LIST_NULL 0x01 /* In the first byte */
+ dont_generate_debug_code
}
SCB_CONTROL {
size 1
@@ -1115,22 +1184,27 @@ scb {
}
SCB_CDB_LEN {
size 1
+ dont_generate_debug_code
}
SCB_SCSIRATE {
size 1
+ dont_generate_debug_code
}
SCB_SCSIOFFSET {
size 1
count 1
+ dont_generate_debug_code
}
SCB_NEXT {
size 1
+ dont_generate_debug_code
}
SCB_64_SPARE {
size 16
}
SCB_64_BTT {
size 16
+ dont_generate_debug_code
}
}
@@ -1149,6 +1223,7 @@ register SEECTL_2840 {
field CS_2840 0x04
field CK_2840 0x02
field DO_2840 0x01
+ dont_generate_debug_code
}
register STATUS_2840 {
@@ -1159,6 +1234,7 @@ register STATUS_2840 {
mask BIOS_SEL 0x60
mask ADSEL 0x1e
field DI_2840 0x01
+ dont_generate_debug_code
}
/* --------------------- AIC-7870-only definitions -------------------- */
@@ -1166,18 +1242,22 @@ register STATUS_2840 {
register CCHADDR {
address 0x0E0
size 8
+ dont_generate_debug_code
}
register CCHCNT {
address 0x0E8
+ dont_generate_debug_code
}
register CCSGRAM {
address 0x0E9
+ dont_generate_debug_code
}
register CCSGADDR {
address 0x0EA
+ dont_generate_debug_code
}
register CCSGCTL {
@@ -1186,11 +1266,13 @@ register CCSGCTL {
field CCSGEN 0x08
field SG_FETCH_NEEDED 0x02 /* Bit used for software state */
field CCSGRESET 0x01
+ dont_generate_debug_code
}
register CCSCBCNT {
address 0xEF
count 1
+ dont_generate_debug_code
}
register CCSCBCTL {
@@ -1201,14 +1283,17 @@ register CCSCBCTL {
field CCSCBEN 0x08
field CCSCBDIR 0x04
field CCSCBRESET 0x01
+ dont_generate_debug_code
}
register CCSCBADDR {
address 0x0ED
+ dont_generate_debug_code
}
register CCSCBRAM {
address 0xEC
+ dont_generate_debug_code
}
/*
@@ -1218,23 +1303,28 @@ register SCBBADDR {
address 0x0F0
access_mode RW
count 3
+ dont_generate_debug_code
}
register CCSCBPTR {
address 0x0F1
+ dont_generate_debug_code
}
register HNSCB_QOFF {
address 0x0F4
count 4
+ dont_generate_debug_code
}
register SNSCB_QOFF {
address 0x0F6
+ dont_generate_debug_code
}
register SDSCB_QOFF {
address 0x0F8
+ dont_generate_debug_code
}
register QOFF_CTLSTA {
@@ -1244,6 +1334,7 @@ register QOFF_CTLSTA {
field SDSCB_ROLLOVER 0x10
mask SCB_QSIZE 0x07
mask SCB_QSIZE_256 0x06
+ dont_generate_debug_code
}
register DFF_THRSH {
@@ -1267,6 +1358,7 @@ register DFF_THRSH {
mask WR_DFTHRSH_90 0x60
mask WR_DFTHRSH_MAX 0x70
count 4
+ dont_generate_debug_code
}
register SG_CACHE_PRE {
@@ -1275,6 +1367,7 @@ register SG_CACHE_PRE {
mask SG_ADDR_MASK 0xf8
field LAST_SEG 0x02
field LAST_SEG_DONE 0x01
+ dont_generate_debug_code
}
register SG_CACHE_SHADOW {
@@ -1283,6 +1376,7 @@ register SG_CACHE_SHADOW {
mask SG_ADDR_MASK 0xf8
field LAST_SEG 0x02
field LAST_SEG_DONE 0x01
+ dont_generate_debug_code
}
/* ---------------------- Scratch RAM Offsets ------------------------- */
/* These offsets are either to values that are initialized by the board's
@@ -1309,6 +1403,7 @@ scratch_ram {
BUSY_TARGETS {
alias TARG_SCSIRATE
size 16
+ dont_generate_debug_code
}
/*
* Bit vector of targets that have ULTRA enabled as set by
@@ -1321,6 +1416,7 @@ scratch_ram {
alias CMDSIZE_TABLE
size 2
count 2
+ dont_generate_debug_code
}
/*
* Bit vector of targets that have disconnection disabled as set by
@@ -1331,6 +1427,7 @@ scratch_ram {
DISC_DSB {
size 2
count 6
+ dont_generate_debug_code
}
CMDSIZE_TABLE_TAIL {
size 4
@@ -1341,12 +1438,14 @@ scratch_ram {
*/
MWI_RESIDUAL {
size 1
+ dont_generate_debug_code
}
/*
* SCBID of the next SCB to be started by the controller.
*/
NEXT_QUEUED_SCB {
size 1
+ dont_generate_debug_code
}
/*
* Single byte buffer used to designate the type or message
@@ -1354,6 +1453,7 @@ scratch_ram {
*/
MSG_OUT {
size 1
+ dont_generate_debug_code
}
/* Parameters for DMA Logic */
DMAPARAMS {
@@ -1369,6 +1469,7 @@ scratch_ram {
field DIRECTION 0x04 /* Set indicates PCI->SCSI */
field FIFOFLUSH 0x02
field FIFORESET 0x01
+ dont_generate_debug_code
}
SEQ_FLAGS {
size 1
@@ -1390,9 +1491,11 @@ scratch_ram {
*/
SAVED_SCSIID {
size 1
+ dont_generate_debug_code
}
SAVED_LUN {
size 1
+ dont_generate_debug_code
}
/*
* The last bus phase as seen by the sequencer.
@@ -1417,6 +1520,7 @@ scratch_ram {
*/
WAITING_SCBH {
size 1
+ dont_generate_debug_code
}
/*
* head of list of SCBs that are
@@ -1425,6 +1529,7 @@ scratch_ram {
*/
DISCONNECTED_SCBH {
size 1
+ dont_generate_debug_code
}
/*
* head of list of SCBs that are
@@ -1432,6 +1537,7 @@ scratch_ram {
*/
FREE_SCBH {
size 1
+ dont_generate_debug_code
}
/*
* head of list of SCBs that have
@@ -1446,6 +1552,7 @@ scratch_ram {
*/
HSCB_ADDR {
size 4
+ dont_generate_debug_code
}
/*
* Base address of our shared data with the kernel driver in host
@@ -1454,15 +1561,19 @@ scratch_ram {
*/
SHARED_DATA_ADDR {
size 4
+ dont_generate_debug_code
}
KERNEL_QINPOS {
size 1
+ dont_generate_debug_code
}
QINPOS {
size 1
+ dont_generate_debug_code
}
QOUTPOS {
size 1
+ dont_generate_debug_code
}
/*
* Kernel and sequencer offsets into the queue of
@@ -1471,9 +1582,11 @@ scratch_ram {
*/
KERNEL_TQINPOS {
size 1
+ dont_generate_debug_code
}
TQINPOS {
size 1
+ dont_generate_debug_code
}
ARG_1 {
size 1
@@ -1486,10 +1599,12 @@ scratch_ram {
mask CONT_MSG_LOOP 0x04
mask CONT_TARG_SESSION 0x02
alias RETURN_1
+ dont_generate_debug_code
}
ARG_2 {
size 1
alias RETURN_2
+ dont_generate_debug_code
}
/*
@@ -1498,6 +1613,7 @@ scratch_ram {
LAST_MSG {
size 1
alias TARG_IMMEDIATE_SCB
+ dont_generate_debug_code
}
/*
@@ -1513,6 +1629,7 @@ scratch_ram {
field ENAUTOATNO 0x08
field ENAUTOATNI 0x04
field ENAUTOATNP 0x02
+ dont_generate_debug_code
}
}
@@ -1533,12 +1650,14 @@ scratch_ram {
field HA_274_EXTENDED_TRANS 0x01
alias INITIATOR_TAG
count 1
+ dont_generate_debug_code
}
SEQ_FLAGS2 {
size 1
field SCB_DMA 0x01
field TARGET_MSG_PENDING 0x02
+ dont_generate_debug_code
}
}
@@ -1562,6 +1681,7 @@ scratch_ram {
field ENSPCHK 0x20
mask HSCSIID 0x07 /* our SCSI ID */
mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */
+ dont_generate_debug_code
}
INTDEF {
address 0x05c
@@ -1569,11 +1689,13 @@ scratch_ram {
count 1
field EDGE_TRIG 0x80
mask VECTOR 0x0f
+ dont_generate_debug_code
}
HOSTCONF {
address 0x05d
size 1
count 1
+ dont_generate_debug_code
}
HA_274_BIOSCTRL {
address 0x05f
@@ -1582,6 +1704,7 @@ scratch_ram {
mask BIOSMODE 0x30
mask BIOSDISABLED 0x30
field CHANNEL_B_PRIMARY 0x08
+ dont_generate_debug_code
}
}
@@ -1595,6 +1718,7 @@ scratch_ram {
TARG_OFFSET {
size 16
count 1
+ dont_generate_debug_code
}
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 0ae2b4605d09..e6f2bb7365e6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -814,6 +814,7 @@ ahc_intr(struct ahc_softc *ahc)
static void
ahc_restart(struct ahc_softc *ahc)
{
+ uint8_t sblkctl;
ahc_pause(ahc);
@@ -868,6 +869,12 @@ ahc_restart(struct ahc_softc *ahc)
ahc_outb(ahc, SEQADDR0, 0);
ahc_outb(ahc, SEQADDR1, 0);
+ /*
+ * Take the LED out of diagnostic mode on PM resume, too
+ */
+ sblkctl = ahc_inb(ahc, SBLKCTL);
+ ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
+
ahc_unpause(ahc);
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
index 2ce1febca207..e821082a4f47 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped
@@ -27,20 +27,6 @@ ahc_reg_print_t ahc_sxfrctl0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sxfrctl1_print;
-#else
-#define ahc_sxfrctl1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SXFRCTL1", 0x02, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsisigo_print;
-#else
-#define ahc_scsisigo_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSISIGO", 0x03, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_scsisigi_print;
#else
#define ahc_scsisigi_print(regvalue, cur_col, wrap) \
@@ -55,55 +41,6 @@ ahc_reg_print_t ahc_scsirate_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsiid_print;
-#else
-#define ahc_scsiid_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSIID", 0x05, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsidatl_print;
-#else
-#define ahc_scsidatl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSIDATL", 0x06, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsidath_print;
-#else
-#define ahc_scsidath_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSIDATH", 0x07, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_stcnt_print;
-#else
-#define ahc_stcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "STCNT", 0x08, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_optionmode_print;
-#else
-#define ahc_optionmode_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "OPTIONMODE", 0x08, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_targcrccnt_print;
-#else
-#define ahc_targcrccnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "TARGCRCCNT", 0x0a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_clrsint0_print;
-#else
-#define ahc_clrsint0_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CLRSINT0", 0x0b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_sstat0_print;
#else
#define ahc_sstat0_print(regvalue, cur_col, wrap) \
@@ -111,13 +48,6 @@ ahc_reg_print_t ahc_sstat0_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_clrsint1_print;
-#else
-#define ahc_clrsint1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CLRSINT1", 0x0c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_sstat1_print;
#else
#define ahc_sstat1_print(regvalue, cur_col, wrap) \
@@ -139,13 +69,6 @@ ahc_reg_print_t ahc_sstat3_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsiid_ultra2_print;
-#else
-#define ahc_scsiid_ultra2_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSIID_ULTRA2", 0x0f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_simode0_print;
#else
#define ahc_simode0_print(regvalue, cur_col, wrap) \
@@ -167,76 +90,6 @@ ahc_reg_print_t ahc_scsibusl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsibush_print;
-#else
-#define ahc_scsibush_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSIBUSH", 0x13, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sxfrctl2_print;
-#else
-#define ahc_sxfrctl2_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SXFRCTL2", 0x13, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_shaddr_print;
-#else
-#define ahc_shaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SHADDR", 0x14, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seltimer_print;
-#else
-#define ahc_seltimer_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SELTIMER", 0x18, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_selid_print;
-#else
-#define ahc_selid_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SELID", 0x19, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scamctl_print;
-#else
-#define ahc_scamctl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCAMCTL", 0x1a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_targid_print;
-#else
-#define ahc_targid_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "TARGID", 0x1b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_spiocap_print;
-#else
-#define ahc_spiocap_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SPIOCAP", 0x1b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_brdctl_print;
-#else
-#define ahc_brdctl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "BRDCTL", 0x1d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seectl_print;
-#else
-#define ahc_seectl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEECTL", 0x1e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_sblkctl_print;
#else
#define ahc_sblkctl_print(regvalue, cur_col, wrap) \
@@ -244,62 +97,6 @@ ahc_reg_print_t ahc_sblkctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_busy_targets_print;
-#else
-#define ahc_busy_targets_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "BUSY_TARGETS", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ultra_enb_print;
-#else
-#define ahc_ultra_enb_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ULTRA_ENB", 0x30, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_disc_dsb_print;
-#else
-#define ahc_disc_dsb_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DISC_DSB", 0x32, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_cmdsize_table_tail_print;
-#else
-#define ahc_cmdsize_table_tail_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL", 0x34, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_mwi_residual_print;
-#else
-#define ahc_mwi_residual_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "MWI_RESIDUAL", 0x38, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_next_queued_scb_print;
-#else
-#define ahc_next_queued_scb_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB", 0x39, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_msg_out_print;
-#else
-#define ahc_msg_out_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "MSG_OUT", 0x3a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dmaparams_print;
-#else
-#define ahc_dmaparams_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DMAPARAMS", 0x3b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_seq_flags_print;
#else
#define ahc_seq_flags_print(regvalue, cur_col, wrap) \
@@ -307,20 +104,6 @@ ahc_reg_print_t ahc_seq_flags_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_saved_scsiid_print;
-#else
-#define ahc_saved_scsiid_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SAVED_SCSIID", 0x3d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_saved_lun_print;
-#else
-#define ahc_saved_lun_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SAVED_LUN", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_lastphase_print;
#else
#define ahc_lastphase_print(regvalue, cur_col, wrap) \
@@ -328,153 +111,6 @@ ahc_reg_print_t ahc_lastphase_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_waiting_scbh_print;
-#else
-#define ahc_waiting_scbh_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "WAITING_SCBH", 0x40, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_disconnected_scbh_print;
-#else
-#define ahc_disconnected_scbh_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DISCONNECTED_SCBH", 0x41, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_free_scbh_print;
-#else
-#define ahc_free_scbh_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "FREE_SCBH", 0x42, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_complete_scbh_print;
-#else
-#define ahc_complete_scbh_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "COMPLETE_SCBH", 0x43, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hscb_addr_print;
-#else
-#define ahc_hscb_addr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HSCB_ADDR", 0x44, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_shared_data_addr_print;
-#else
-#define ahc_shared_data_addr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SHARED_DATA_ADDR", 0x48, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_kernel_qinpos_print;
-#else
-#define ahc_kernel_qinpos_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "KERNEL_QINPOS", 0x4c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qinpos_print;
-#else
-#define ahc_qinpos_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QINPOS", 0x4d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qoutpos_print;
-#else
-#define ahc_qoutpos_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QOUTPOS", 0x4e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_kernel_tqinpos_print;
-#else
-#define ahc_kernel_tqinpos_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "KERNEL_TQINPOS", 0x4f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_tqinpos_print;
-#else
-#define ahc_tqinpos_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "TQINPOS", 0x50, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_arg_1_print;
-#else
-#define ahc_arg_1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ARG_1", 0x51, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_arg_2_print;
-#else
-#define ahc_arg_2_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ARG_2", 0x52, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_last_msg_print;
-#else
-#define ahc_last_msg_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "LAST_MSG", 0x53, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsiseq_template_print;
-#else
-#define ahc_scsiseq_template_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSISEQ_TEMPLATE", 0x54, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ha_274_biosglobal_print;
-#else
-#define ahc_ha_274_biosglobal_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HA_274_BIOSGLOBAL", 0x56, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seq_flags2_print;
-#else
-#define ahc_seq_flags2_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEQ_FLAGS2", 0x57, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scsiconf_print;
-#else
-#define ahc_scsiconf_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCSICONF", 0x5a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_intdef_print;
-#else
-#define ahc_intdef_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "INTDEF", 0x5c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hostconf_print;
-#else
-#define ahc_hostconf_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HOSTCONF", 0x5d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ha_274_biosctrl_print;
-#else
-#define ahc_ha_274_biosctrl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HA_274_BIOSCTRL", 0x5f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_seqctl_print;
#else
#define ahc_seqctl_print(regvalue, cur_col, wrap) \
@@ -482,111 +118,6 @@ ahc_reg_print_t ahc_seqctl_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seqram_print;
-#else
-#define ahc_seqram_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEQRAM", 0x61, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seqaddr0_print;
-#else
-#define ahc_seqaddr0_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEQADDR0", 0x62, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seqaddr1_print;
-#else
-#define ahc_seqaddr1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEQADDR1", 0x63, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_accum_print;
-#else
-#define ahc_accum_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ACCUM", 0x64, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sindex_print;
-#else
-#define ahc_sindex_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SINDEX", 0x65, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dindex_print;
-#else
-#define ahc_dindex_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DINDEX", 0x66, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_allones_print;
-#else
-#define ahc_allones_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ALLONES", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_allzeros_print;
-#else
-#define ahc_allzeros_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "ALLZEROS", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_none_print;
-#else
-#define ahc_none_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "NONE", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_flags_print;
-#else
-#define ahc_flags_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "FLAGS", 0x6b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sindir_print;
-#else
-#define ahc_sindir_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SINDIR", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dindir_print;
-#else
-#define ahc_dindir_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DINDIR", 0x6d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_function1_print;
-#else
-#define ahc_function1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "FUNCTION1", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_stack_print;
-#else
-#define ahc_stack_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "STACK", 0x6f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_targ_offset_print;
-#else
-#define ahc_targ_offset_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "TARG_OFFSET", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_sram_base_print;
#else
#define ahc_sram_base_print(regvalue, cur_col, wrap) \
@@ -594,97 +125,6 @@ ahc_reg_print_t ahc_sram_base_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_bctl_print;
-#else
-#define ahc_bctl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "BCTL", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dscommand0_print;
-#else
-#define ahc_dscommand0_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DSCOMMAND0", 0x84, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_bustime_print;
-#else
-#define ahc_bustime_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "BUSTIME", 0x85, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dscommand1_print;
-#else
-#define ahc_dscommand1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DSCOMMAND1", 0x85, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_busspd_print;
-#else
-#define ahc_busspd_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "BUSSPD", 0x86, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hs_mailbox_print;
-#else
-#define ahc_hs_mailbox_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HS_MAILBOX", 0x86, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dspcistatus_print;
-#else
-#define ahc_dspcistatus_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DSPCISTATUS", 0x86, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hcntrl_print;
-#else
-#define ahc_hcntrl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HCNTRL", 0x87, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_haddr_print;
-#else
-#define ahc_haddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HADDR", 0x88, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hcnt_print;
-#else
-#define ahc_hcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HCNT", 0x8c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scbptr_print;
-#else
-#define ahc_scbptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCBPTR", 0x90, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_intstat_print;
-#else
-#define ahc_intstat_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "INTSTAT", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_clrint_print;
-#else
-#define ahc_clrint_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CLRINT", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_error_print;
#else
#define ahc_error_print(regvalue, cur_col, wrap) \
@@ -706,69 +146,6 @@ ahc_reg_print_t ahc_dfstatus_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dfwaddr_print;
-#else
-#define ahc_dfwaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DFWADDR", 0x95, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dfraddr_print;
-#else
-#define ahc_dfraddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DFRADDR", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dfdat_print;
-#else
-#define ahc_dfdat_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DFDAT", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scbcnt_print;
-#else
-#define ahc_scbcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCBCNT", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qinfifo_print;
-#else
-#define ahc_qinfifo_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QINFIFO", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qincnt_print;
-#else
-#define ahc_qincnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QINCNT", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qoutfifo_print;
-#else
-#define ahc_qoutfifo_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QOUTFIFO", 0x9d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_crccontrol1_print;
-#else
-#define ahc_crccontrol1_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CRCCONTROL1", 0x9d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qoutcnt_print;
-#else
-#define ahc_qoutcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QOUTCNT", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_scsiphase_print;
#else
#define ahc_scsiphase_print(regvalue, cur_col, wrap) \
@@ -776,13 +153,6 @@ ahc_reg_print_t ahc_scsiphase_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sfunct_print;
-#else
-#define ahc_sfunct_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_scb_base_print;
#else
#define ahc_scb_base_print(regvalue, cur_col, wrap) \
@@ -790,69 +160,6 @@ ahc_reg_print_t ahc_scb_base_print;
#endif
#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_cdb_ptr_print;
-#else
-#define ahc_scb_cdb_ptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_CDB_PTR", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_residual_sgptr_print;
-#else
-#define ahc_scb_residual_sgptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_scsi_status_print;
-#else
-#define ahc_scb_scsi_status_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_SCSI_STATUS", 0xa8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_target_phases_print;
-#else
-#define ahc_scb_target_phases_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_TARGET_PHASES", 0xa9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_target_data_dir_print;
-#else
-#define ahc_scb_target_data_dir_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0xaa, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_target_itag_print;
-#else
-#define ahc_scb_target_itag_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_TARGET_ITAG", 0xab, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_dataptr_print;
-#else
-#define ahc_scb_dataptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_DATAPTR", 0xac, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_datacnt_print;
-#else
-#define ahc_scb_datacnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_DATACNT", 0xb0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_sgptr_print;
-#else
-#define ahc_scb_sgptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_SGPTR", 0xb4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
ahc_reg_print_t ahc_scb_control_print;
#else
#define ahc_scb_control_print(regvalue, cur_col, wrap) \
@@ -880,188 +187,6 @@ ahc_reg_print_t ahc_scb_tag_print;
ahc_print_register(NULL, 0, "SCB_TAG", 0xbb, regvalue, cur_col, wrap)
#endif
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_cdb_len_print;
-#else
-#define ahc_scb_cdb_len_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_CDB_LEN", 0xbc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_scsirate_print;
-#else
-#define ahc_scb_scsirate_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_SCSIRATE", 0xbd, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_scsioffset_print;
-#else
-#define ahc_scb_scsioffset_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_SCSIOFFSET", 0xbe, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_next_print;
-#else
-#define ahc_scb_next_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_NEXT", 0xbf, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_64_spare_print;
-#else
-#define ahc_scb_64_spare_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_64_SPARE", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_seectl_2840_print;
-#else
-#define ahc_seectl_2840_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SEECTL_2840", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_status_2840_print;
-#else
-#define ahc_status_2840_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "STATUS_2840", 0xc1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scb_64_btt_print;
-#else
-#define ahc_scb_64_btt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCB_64_BTT", 0xd0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_cchaddr_print;
-#else
-#define ahc_cchaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCHADDR", 0xe0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_cchcnt_print;
-#else
-#define ahc_cchcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCHCNT", 0xe8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccsgram_print;
-#else
-#define ahc_ccsgram_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSGRAM", 0xe9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccsgaddr_print;
-#else
-#define ahc_ccsgaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSGADDR", 0xea, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccsgctl_print;
-#else
-#define ahc_ccsgctl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSGCTL", 0xeb, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccscbram_print;
-#else
-#define ahc_ccscbram_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSCBRAM", 0xec, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccscbaddr_print;
-#else
-#define ahc_ccscbaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSCBADDR", 0xed, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccscbctl_print;
-#else
-#define ahc_ccscbctl_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSCBCTL", 0xee, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccscbcnt_print;
-#else
-#define ahc_ccscbcnt_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSCBCNT", 0xef, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_scbbaddr_print;
-#else
-#define ahc_scbbaddr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SCBBADDR", 0xf0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_ccscbptr_print;
-#else
-#define ahc_ccscbptr_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "CCSCBPTR", 0xf1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_hnscb_qoff_print;
-#else
-#define ahc_hnscb_qoff_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "HNSCB_QOFF", 0xf4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_snscb_qoff_print;
-#else
-#define ahc_snscb_qoff_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SNSCB_QOFF", 0xf6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sdscb_qoff_print;
-#else
-#define ahc_sdscb_qoff_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SDSCB_QOFF", 0xf8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_qoff_ctlsta_print;
-#else
-#define ahc_qoff_ctlsta_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "QOFF_CTLSTA", 0xfa, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_dff_thrsh_print;
-#else
-#define ahc_dff_thrsh_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "DFF_THRSH", 0xfb, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sg_cache_shadow_print;
-#else
-#define ahc_sg_cache_shadow_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SG_CACHE_SHADOW", 0xfc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahc_reg_print_t ahc_sg_cache_pre_print;
-#else
-#define ahc_sg_cache_pre_print(regvalue, cur_col, wrap) \
- ahc_print_register(NULL, 0, "SG_CACHE_PRE", 0xfc, regvalue, cur_col, wrap)
-#endif
-
#define SCSISEQ 0x00
#define TEMODE 0x80
diff --git a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
index 309a562b009e..9f9b88047d0c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
+++ b/drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
@@ -43,48 +43,6 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x01, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
- { "STPWEN", 0x01, 0x01 },
- { "ACTNEGEN", 0x02, 0x02 },
- { "ENSTIMER", 0x04, 0x04 },
- { "ENSPCHK", 0x20, 0x20 },
- { "SWRAPEN", 0x40, 0x40 },
- { "BITBUCKET", 0x80, 0x80 },
- { "STIMESEL", 0x18, 0x18 }
-};
-
-int
-ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SXFRCTL1_parse_table, 7, "SXFRCTL1",
- 0x02, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
- { "ACKO", 0x01, 0x01 },
- { "REQO", 0x02, 0x02 },
- { "BSYO", 0x04, 0x04 },
- { "SELO", 0x08, 0x08 },
- { "ATNO", 0x10, 0x10 },
- { "MSGO", 0x20, 0x20 },
- { "IOO", 0x40, 0x40 },
- { "CDO", 0x80, 0x80 },
- { "P_DATAOUT", 0x00, 0x00 },
- { "P_DATAIN", 0x40, 0x40 },
- { "P_COMMAND", 0x80, 0x80 },
- { "P_MESGOUT", 0xa0, 0xa0 },
- { "P_STATUS", 0xc0, 0xc0 },
- { "PHASE_MASK", 0xe0, 0xe0 },
- { "P_MESGIN", 0xe0, 0xe0 }
-};
-
-int
-ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCSISIGO_parse_table, 15, "SCSISIGO",
- 0x03, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
{ "ACKI", 0x01, 0x01 },
{ "REQI", 0x02, 0x02 },
@@ -128,77 +86,6 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x04, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
- { "TWIN_CHNLB", 0x80, 0x80 },
- { "OID", 0x0f, 0x0f },
- { "TWIN_TID", 0x70, 0x70 },
- { "SOFS_ULTRA2", 0x7f, 0x7f },
- { "TID", 0xf0, 0xf0 }
-};
-
-int
-ahc_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCSIID_parse_table, 5, "SCSIID",
- 0x05, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCSIDATL",
- 0x06, regvalue, cur_col, wrap));
-}
-
-int
-ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "STCNT",
- 0x08, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
- { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
- { "AUTO_MSGOUT_DE", 0x02, 0x02 },
- { "SCSIDATL_IMGEN", 0x04, 0x04 },
- { "EXPPHASEDIS", 0x08, 0x08 },
- { "BUSFREEREV", 0x10, 0x10 },
- { "ATNMGMNTEN", 0x20, 0x20 },
- { "AUTOACKEN", 0x40, 0x40 },
- { "AUTORATEEN", 0x80, 0x80 },
- { "OPTIONMODE_DEFAULTS",0x03, 0x03 }
-};
-
-int
-ahc_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(OPTIONMODE_parse_table, 9, "OPTIONMODE",
- 0x08, regvalue, cur_col, wrap));
-}
-
-int
-ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "TARGCRCCNT",
- 0x0a, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
- { "CLRSPIORDY", 0x02, 0x02 },
- { "CLRSWRAP", 0x08, 0x08 },
- { "CLRIOERR", 0x08, 0x08 },
- { "CLRSELINGO", 0x10, 0x10 },
- { "CLRSELDI", 0x20, 0x20 },
- { "CLRSELDO", 0x40, 0x40 }
-};
-
-int
-ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CLRSINT0_parse_table, 6, "CLRSINT0",
- 0x0b, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
{ "DMADONE", 0x01, 0x01 },
{ "SPIORDY", 0x02, 0x02 },
@@ -218,23 +105,6 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0b, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
- { "CLRREQINIT", 0x01, 0x01 },
- { "CLRPHASECHG", 0x02, 0x02 },
- { "CLRSCSIPERR", 0x04, 0x04 },
- { "CLRBUSFREE", 0x08, 0x08 },
- { "CLRSCSIRSTI", 0x20, 0x20 },
- { "CLRATNO", 0x40, 0x40 },
- { "CLRSELTIMEO", 0x80, 0x80 }
-};
-
-int
-ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
- 0x0c, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
{ "REQINIT", 0x01, 0x01 },
{ "PHASECHG", 0x02, 0x02 },
@@ -284,18 +154,6 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x0e, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
- { "OID", 0x0f, 0x0f },
- { "TID", 0xf0, 0xf0 }
-};
-
-int
-ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCSIID_ULTRA2_parse_table, 2, "SCSIID_ULTRA2",
- 0x0f, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
{ "ENDMADONE", 0x01, 0x01 },
{ "ENSPIORDY", 0x02, 0x02 },
@@ -339,107 +197,6 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x12, regvalue, cur_col, wrap));
}
-int
-ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SHADDR",
- 0x14, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
- { "STAGE1", 0x01, 0x01 },
- { "STAGE2", 0x02, 0x02 },
- { "STAGE3", 0x04, 0x04 },
- { "STAGE4", 0x08, 0x08 },
- { "STAGE5", 0x10, 0x10 },
- { "STAGE6", 0x20, 0x20 }
-};
-
-int
-ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SELTIMER_parse_table, 6, "SELTIMER",
- 0x18, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SELID_parse_table[] = {
- { "ONEBIT", 0x08, 0x08 },
- { "SELID_MASK", 0xf0, 0xf0 }
-};
-
-int
-ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SELID_parse_table, 2, "SELID",
- 0x19, regvalue, cur_col, wrap));
-}
-
-int
-ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "TARGID",
- 0x1b, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
- { "SSPIOCPS", 0x01, 0x01 },
- { "ROM", 0x02, 0x02 },
- { "EEPROM", 0x04, 0x04 },
- { "SEEPROM", 0x08, 0x08 },
- { "EXT_BRDCTL", 0x10, 0x10 },
- { "SOFTCMDEN", 0x20, 0x20 },
- { "SOFT0", 0x40, 0x40 },
- { "SOFT1", 0x80, 0x80 }
-};
-
-int
-ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SPIOCAP_parse_table, 8, "SPIOCAP",
- 0x1b, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
- { "BRDCTL0", 0x01, 0x01 },
- { "BRDSTB_ULTRA2", 0x01, 0x01 },
- { "BRDCTL1", 0x02, 0x02 },
- { "BRDRW_ULTRA2", 0x02, 0x02 },
- { "BRDRW", 0x04, 0x04 },
- { "BRDDAT2", 0x04, 0x04 },
- { "BRDCS", 0x08, 0x08 },
- { "BRDDAT3", 0x08, 0x08 },
- { "BRDSTB", 0x10, 0x10 },
- { "BRDDAT4", 0x10, 0x10 },
- { "BRDDAT5", 0x20, 0x20 },
- { "BRDDAT6", 0x40, 0x40 },
- { "BRDDAT7", 0x80, 0x80 }
-};
-
-int
-ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(BRDCTL_parse_table, 13, "BRDCTL",
- 0x1d, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
- { "SEEDI", 0x01, 0x01 },
- { "SEEDO", 0x02, 0x02 },
- { "SEECK", 0x04, 0x04 },
- { "SEECS", 0x08, 0x08 },
- { "SEERDY", 0x10, 0x10 },
- { "SEEMS", 0x20, 0x20 },
- { "EXTARBREQ", 0x40, 0x40 },
- { "EXTARBACK", 0x80, 0x80 }
-};
-
-int
-ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SEECTL_parse_table, 8, "SEECTL",
- 0x1e, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
{ "XCVR", 0x01, 0x01 },
{ "SELWIDE", 0x02, 0x02 },
@@ -458,68 +215,6 @@ ahc_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x1f, regvalue, cur_col, wrap));
}
-int
-ahc_busy_targets_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "BUSY_TARGETS",
- 0x20, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ultra_enb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "ULTRA_ENB",
- 0x30, regvalue, cur_col, wrap));
-}
-
-int
-ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DISC_DSB",
- 0x32, regvalue, cur_col, wrap));
-}
-
-int
-ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "MWI_RESIDUAL",
- 0x38, regvalue, cur_col, wrap));
-}
-
-int
-ahc_next_queued_scb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "NEXT_QUEUED_SCB",
- 0x39, regvalue, cur_col, wrap));
-}
-
-int
-ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "MSG_OUT",
- 0x3a, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
- { "FIFORESET", 0x01, 0x01 },
- { "FIFOFLUSH", 0x02, 0x02 },
- { "DIRECTION", 0x04, 0x04 },
- { "HDMAEN", 0x08, 0x08 },
- { "HDMAENACK", 0x08, 0x08 },
- { "SDMAEN", 0x10, 0x10 },
- { "SDMAENACK", 0x10, 0x10 },
- { "SCSIEN", 0x20, 0x20 },
- { "WIDEODD", 0x40, 0x40 },
- { "PRELOADEN", 0x80, 0x80 }
-};
-
-int
-ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(DMAPARAMS_parse_table, 10, "DMAPARAMS",
- 0x3b, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
{ "NO_DISCONNECT", 0x01, 0x01 },
{ "SPHASE_PENDING", 0x02, 0x02 },
@@ -539,20 +234,6 @@ ahc_seq_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3c, regvalue, cur_col, wrap));
}
-int
-ahc_saved_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SAVED_SCSIID",
- 0x3d, regvalue, cur_col, wrap));
-}
-
-int
-ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SAVED_LUN",
- 0x3e, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
{ "MSGI", 0x20, 0x20 },
{ "IOI", 0x40, 0x40 },
@@ -574,193 +255,6 @@ ahc_lastphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x3f, regvalue, cur_col, wrap));
}
-int
-ahc_waiting_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "WAITING_SCBH",
- 0x40, regvalue, cur_col, wrap));
-}
-
-int
-ahc_disconnected_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DISCONNECTED_SCBH",
- 0x41, regvalue, cur_col, wrap));
-}
-
-int
-ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "FREE_SCBH",
- 0x42, regvalue, cur_col, wrap));
-}
-
-int
-ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "HSCB_ADDR",
- 0x44, regvalue, cur_col, wrap));
-}
-
-int
-ahc_shared_data_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SHARED_DATA_ADDR",
- 0x48, regvalue, cur_col, wrap));
-}
-
-int
-ahc_kernel_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "KERNEL_QINPOS",
- 0x4c, regvalue, cur_col, wrap));
-}
-
-int
-ahc_qinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QINPOS",
- 0x4d, regvalue, cur_col, wrap));
-}
-
-int
-ahc_qoutpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QOUTPOS",
- 0x4e, regvalue, cur_col, wrap));
-}
-
-int
-ahc_kernel_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "KERNEL_TQINPOS",
- 0x4f, regvalue, cur_col, wrap));
-}
-
-int
-ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "TQINPOS",
- 0x50, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
- { "CONT_TARG_SESSION", 0x02, 0x02 },
- { "CONT_MSG_LOOP", 0x04, 0x04 },
- { "EXIT_MSG_LOOP", 0x08, 0x08 },
- { "MSGOUT_PHASEMIS", 0x10, 0x10 },
- { "SEND_REJ", 0x20, 0x20 },
- { "SEND_SENSE", 0x40, 0x40 },
- { "SEND_MSG", 0x80, 0x80 }
-};
-
-int
-ahc_arg_1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(ARG_1_parse_table, 7, "ARG_1",
- 0x51, regvalue, cur_col, wrap));
-}
-
-int
-ahc_arg_2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "ARG_2",
- 0x52, regvalue, cur_col, wrap));
-}
-
-int
-ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "LAST_MSG",
- 0x53, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
- { "ENAUTOATNP", 0x02, 0x02 },
- { "ENAUTOATNI", 0x04, 0x04 },
- { "ENAUTOATNO", 0x08, 0x08 },
- { "ENRSELI", 0x10, 0x10 },
- { "ENSELI", 0x20, 0x20 },
- { "ENSELO", 0x40, 0x40 }
-};
-
-int
-ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCSISEQ_TEMPLATE_parse_table, 6, "SCSISEQ_TEMPLATE",
- 0x54, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
- { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
-};
-
-int
-ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(HA_274_BIOSGLOBAL_parse_table, 1, "HA_274_BIOSGLOBAL",
- 0x56, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
- { "SCB_DMA", 0x01, 0x01 },
- { "TARGET_MSG_PENDING", 0x02, 0x02 }
-};
-
-int
-ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SEQ_FLAGS2_parse_table, 2, "SEQ_FLAGS2",
- 0x57, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
- { "ENSPCHK", 0x20, 0x20 },
- { "RESET_SCSI", 0x40, 0x40 },
- { "TERM_ENB", 0x80, 0x80 },
- { "HSCSIID", 0x07, 0x07 },
- { "HWSCSIID", 0x0f, 0x0f }
-};
-
-int
-ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCSICONF_parse_table, 5, "SCSICONF",
- 0x5a, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
- { "EDGE_TRIG", 0x80, 0x80 },
- { "VECTOR", 0x0f, 0x0f }
-};
-
-int
-ahc_intdef_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(INTDEF_parse_table, 2, "INTDEF",
- 0x5c, regvalue, cur_col, wrap));
-}
-
-int
-ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "HOSTCONF",
- 0x5d, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
- { "CHANNEL_B_PRIMARY", 0x08, 0x08 },
- { "BIOSMODE", 0x30, 0x30 },
- { "BIOSDISABLED", 0x30, 0x30 }
-};
-
-int
-ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(HA_274_BIOSCTRL_parse_table, 3, "HA_274_BIOSCTRL",
- 0x5f, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
{ "LOADRAM", 0x01, 0x01 },
{ "SEQRESET", 0x02, 0x02 },
@@ -780,285 +274,12 @@ ahc_seqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
}
int
-ahc_seqram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SEQRAM",
- 0x61, regvalue, cur_col, wrap));
-}
-
-int
-ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SEQADDR0",
- 0x62, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
- { "SEQADDR1_MASK", 0x01, 0x01 }
-};
-
-int
-ahc_seqaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SEQADDR1_parse_table, 1, "SEQADDR1",
- 0x63, regvalue, cur_col, wrap));
-}
-
-int
-ahc_accum_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "ACCUM",
- 0x64, regvalue, cur_col, wrap));
-}
-
-int
-ahc_sindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SINDEX",
- 0x65, regvalue, cur_col, wrap));
-}
-
-int
-ahc_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DINDEX",
- 0x66, regvalue, cur_col, wrap));
-}
-
-int
-ahc_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "ALLONES",
- 0x69, regvalue, cur_col, wrap));
-}
-
-int
-ahc_allzeros_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "ALLZEROS",
- 0x6a, regvalue, cur_col, wrap));
-}
-
-int
-ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "NONE",
- 0x6a, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
- { "CARRY", 0x01, 0x01 },
- { "ZERO", 0x02, 0x02 }
-};
-
-int
-ahc_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(FLAGS_parse_table, 2, "FLAGS",
- 0x6b, regvalue, cur_col, wrap));
-}
-
-int
-ahc_sindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SINDIR",
- 0x6c, regvalue, cur_col, wrap));
-}
-
-int
-ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DINDIR",
- 0x6d, regvalue, cur_col, wrap));
-}
-
-int
-ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "STACK",
- 0x6f, regvalue, cur_col, wrap));
-}
-
-int
-ahc_targ_offset_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "TARG_OFFSET",
- 0x70, regvalue, cur_col, wrap));
-}
-
-int
ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
return (ahc_print_register(NULL, 0, "SRAM_BASE",
0x70, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
- { "CIOPARCKEN", 0x01, 0x01 },
- { "USCBSIZE32", 0x02, 0x02 },
- { "RAMPS", 0x04, 0x04 },
- { "INTSCBRAMSEL", 0x08, 0x08 },
- { "EXTREQLCK", 0x10, 0x10 },
- { "MPARCKEN", 0x20, 0x20 },
- { "DPARCKEN", 0x40, 0x40 },
- { "CACHETHEN", 0x80, 0x80 }
-};
-
-int
-ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(DSCOMMAND0_parse_table, 8, "DSCOMMAND0",
- 0x84, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
- { "BON", 0x0f, 0x0f },
- { "BOFF", 0xf0, 0xf0 }
-};
-
-int
-ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(BUSTIME_parse_table, 2, "BUSTIME",
- 0x85, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
- { "HADDLDSEL0", 0x01, 0x01 },
- { "HADDLDSEL1", 0x02, 0x02 },
- { "DSLATT", 0xfc, 0xfc }
-};
-
-int
-ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(DSCOMMAND1_parse_table, 3, "DSCOMMAND1",
- 0x85, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
- { "STBON", 0x07, 0x07 },
- { "STBOFF", 0x38, 0x38 },
- { "DFTHRSH_75", 0x80, 0x80 },
- { "DFTHRSH", 0xc0, 0xc0 },
- { "DFTHRSH_100", 0xc0, 0xc0 }
-};
-
-int
-ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(BUSSPD_parse_table, 5, "BUSSPD",
- 0x86, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
- { "SEQ_MAILBOX", 0x0f, 0x0f },
- { "HOST_TQINPOS", 0x80, 0x80 },
- { "HOST_MAILBOX", 0xf0, 0xf0 }
-};
-
-int
-ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(HS_MAILBOX_parse_table, 3, "HS_MAILBOX",
- 0x86, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
- { "DFTHRSH_100", 0xc0, 0xc0 }
-};
-
-int
-ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(DSPCISTATUS_parse_table, 1, "DSPCISTATUS",
- 0x86, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
- { "CHIPRST", 0x01, 0x01 },
- { "CHIPRSTACK", 0x01, 0x01 },
- { "INTEN", 0x02, 0x02 },
- { "PAUSE", 0x04, 0x04 },
- { "IRQMS", 0x08, 0x08 },
- { "SWINT", 0x10, 0x10 },
- { "POWRDN", 0x40, 0x40 }
-};
-
-int
-ahc_hcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(HCNTRL_parse_table, 7, "HCNTRL",
- 0x87, regvalue, cur_col, wrap));
-}
-
-int
-ahc_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "HADDR",
- 0x88, regvalue, cur_col, wrap));
-}
-
-int
-ahc_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "HCNT",
- 0x8c, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCBPTR",
- 0x90, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
- { "SEQINT", 0x01, 0x01 },
- { "CMDCMPLT", 0x02, 0x02 },
- { "SCSIINT", 0x04, 0x04 },
- { "BRKADRINT", 0x08, 0x08 },
- { "BAD_PHASE", 0x01, 0x01 },
- { "INT_PEND", 0x0f, 0x0f },
- { "SEND_REJECT", 0x11, 0x11 },
- { "PROTO_VIOLATION", 0x21, 0x21 },
- { "NO_MATCH", 0x31, 0x31 },
- { "IGN_WIDE_RES", 0x41, 0x41 },
- { "PDATA_REINIT", 0x51, 0x51 },
- { "HOST_MSG_LOOP", 0x61, 0x61 },
- { "BAD_STATUS", 0x71, 0x71 },
- { "PERR_DETECTED", 0x81, 0x81 },
- { "DATA_OVERRUN", 0x91, 0x91 },
- { "MKMSG_FAILED", 0xa1, 0xa1 },
- { "MISSED_BUSFREE", 0xb1, 0xb1 },
- { "SCB_MISMATCH", 0xc1, 0xc1 },
- { "NO_FREE_SCB", 0xd1, 0xd1 },
- { "OUT_OF_RANGE", 0xe1, 0xe1 },
- { "SEQINT_MASK", 0xf1, 0xf1 }
-};
-
-int
-ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(INTSTAT_parse_table, 21, "INTSTAT",
- 0x91, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
- { "CLRSEQINT", 0x01, 0x01 },
- { "CLRCMDINT", 0x02, 0x02 },
- { "CLRSCSIINT", 0x04, 0x04 },
- { "CLRBRKADRINT", 0x08, 0x08 },
- { "CLRPARERR", 0x10, 0x10 }
-};
-
-int
-ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CLRINT_parse_table, 5, "CLRINT",
- 0x92, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
{ "ILLHADDR", 0x01, 0x01 },
{ "ILLSADDR", 0x02, 0x02 },
@@ -1115,62 +336,6 @@ ahc_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x94, regvalue, cur_col, wrap));
}
-int
-ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DFWADDR",
- 0x95, regvalue, cur_col, wrap));
-}
-
-int
-ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "DFDAT",
- 0x99, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
- { "SCBAUTO", 0x80, 0x80 },
- { "SCBCNT_MASK", 0x1f, 0x1f }
-};
-
-int
-ahc_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCBCNT_parse_table, 2, "SCBCNT",
- 0x9a, regvalue, cur_col, wrap));
-}
-
-int
-ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QINFIFO",
- 0x9b, regvalue, cur_col, wrap));
-}
-
-int
-ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "QOUTFIFO",
- 0x9d, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
- { "TARGCRCCNTEN", 0x04, 0x04 },
- { "TARGCRCENDEN", 0x08, 0x08 },
- { "CRCREQCHKEN", 0x10, 0x10 },
- { "CRCENDCHKEN", 0x20, 0x20 },
- { "CRCVALCHKEN", 0x40, 0x40 },
- { "CRCONSEEN", 0x80, 0x80 }
-};
-
-int
-ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CRCCONTROL1_parse_table, 6, "CRCCONTROL1",
- 0x9d, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
{ "DATA_OUT_PHASE", 0x01, 0x01 },
{ "DATA_IN_PHASE", 0x02, 0x02 },
@@ -1188,17 +353,6 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
0x9e, regvalue, cur_col, wrap));
}
-static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
- { "ALT_MODE", 0x80, 0x80 }
-};
-
-int
-ahc_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SFUNCT_parse_table, 1, "SFUNCT",
- 0x9f, regvalue, cur_col, wrap));
-}
-
int
ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
{
@@ -1206,80 +360,6 @@ ahc_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xa0, regvalue, cur_col, wrap));
}
-int
-ahc_scb_cdb_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_CDB_PTR",
- 0xa0, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_residual_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_RESIDUAL_SGPTR",
- 0xa4, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_SCSI_STATUS",
- 0xa8, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_TARGET_PHASES",
- 0xa9, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
- 0xaa, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_TARGET_ITAG",
- 0xab, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_DATAPTR",
- 0xac, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
- { "SG_LAST_SEG", 0x80, 0x80 },
- { "SG_HIGH_ADDR_BITS", 0x7f, 0x7f }
-};
-
-int
-ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCB_DATACNT_parse_table, 2, "SCB_DATACNT",
- 0xb0, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
- { "SG_LIST_NULL", 0x01, 0x01 },
- { "SG_FULL_RESID", 0x02, 0x02 },
- { "SG_RESID_VALID", 0x04, 0x04 }
-};
-
-int
-ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SCB_SGPTR_parse_table, 3, "SCB_SGPTR",
- 0xb4, regvalue, cur_col, wrap));
-}
-
static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
{ "DISCONNECTED", 0x04, 0x04 },
{ "ULTRAENB", 0x08, 0x08 },
@@ -1331,248 +411,3 @@ ahc_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
0xbb, regvalue, cur_col, wrap));
}
-int
-ahc_scb_cdb_len_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_CDB_LEN",
- 0xbc, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_SCSIRATE",
- 0xbd, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_scsioffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_SCSIOFFSET",
- 0xbe, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_NEXT",
- 0xbf, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
- { "DO_2840", 0x01, 0x01 },
- { "CK_2840", 0x02, 0x02 },
- { "CS_2840", 0x04, 0x04 }
-};
-
-int
-ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SEECTL_2840_parse_table, 3, "SEECTL_2840",
- 0xc0, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
- { "DI_2840", 0x01, 0x01 },
- { "EEPROM_TF", 0x80, 0x80 },
- { "ADSEL", 0x1e, 0x1e },
- { "BIOS_SEL", 0x60, 0x60 }
-};
-
-int
-ahc_status_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(STATUS_2840_parse_table, 4, "STATUS_2840",
- 0xc1, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scb_64_btt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCB_64_BTT",
- 0xd0, regvalue, cur_col, wrap));
-}
-
-int
-ahc_cchaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCHADDR",
- 0xe0, regvalue, cur_col, wrap));
-}
-
-int
-ahc_cchcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCHCNT",
- 0xe8, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSGRAM",
- 0xe9, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSGADDR",
- 0xea, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
- { "CCSGRESET", 0x01, 0x01 },
- { "SG_FETCH_NEEDED", 0x02, 0x02 },
- { "CCSGEN", 0x08, 0x08 },
- { "CCSGDONE", 0x80, 0x80 }
-};
-
-int
-ahc_ccsgctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CCSGCTL_parse_table, 4, "CCSGCTL",
- 0xeb, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSCBRAM",
- 0xec, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSCBADDR",
- 0xed, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
- { "CCSCBRESET", 0x01, 0x01 },
- { "CCSCBDIR", 0x04, 0x04 },
- { "CCSCBEN", 0x08, 0x08 },
- { "CCARREN", 0x10, 0x10 },
- { "ARRDONE", 0x40, 0x40 },
- { "CCSCBDONE", 0x80, 0x80 }
-};
-
-int
-ahc_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(CCSCBCTL_parse_table, 6, "CCSCBCTL",
- 0xee, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccscbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSCBCNT",
- 0xef, regvalue, cur_col, wrap));
-}
-
-int
-ahc_scbbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SCBBADDR",
- 0xf0, regvalue, cur_col, wrap));
-}
-
-int
-ahc_ccscbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "CCSCBPTR",
- 0xf1, regvalue, cur_col, wrap));
-}
-
-int
-ahc_hnscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "HNSCB_QOFF",
- 0xf4, regvalue, cur_col, wrap));
-}
-
-int
-ahc_snscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SNSCB_QOFF",
- 0xf6, regvalue, cur_col, wrap));
-}
-
-int
-ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(NULL, 0, "SDSCB_QOFF",
- 0xf8, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
- { "SDSCB_ROLLOVER", 0x10, 0x10 },
- { "SNSCB_ROLLOVER", 0x20, 0x20 },
- { "SCB_AVAIL", 0x40, 0x40 },
- { "SCB_QSIZE_256", 0x06, 0x06 },
- { "SCB_QSIZE", 0x07, 0x07 }
-};
-
-int
-ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(QOFF_CTLSTA_parse_table, 5, "QOFF_CTLSTA",
- 0xfa, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
- { "RD_DFTHRSH_MIN", 0x00, 0x00 },
- { "WR_DFTHRSH_MIN", 0x00, 0x00 },
- { "RD_DFTHRSH_25", 0x01, 0x01 },
- { "RD_DFTHRSH_50", 0x02, 0x02 },
- { "RD_DFTHRSH_63", 0x03, 0x03 },
- { "RD_DFTHRSH_75", 0x04, 0x04 },
- { "RD_DFTHRSH_85", 0x05, 0x05 },
- { "RD_DFTHRSH_90", 0x06, 0x06 },
- { "RD_DFTHRSH", 0x07, 0x07 },
- { "RD_DFTHRSH_MAX", 0x07, 0x07 },
- { "WR_DFTHRSH_25", 0x10, 0x10 },
- { "WR_DFTHRSH_50", 0x20, 0x20 },
- { "WR_DFTHRSH_63", 0x30, 0x30 },
- { "WR_DFTHRSH_75", 0x40, 0x40 },
- { "WR_DFTHRSH_85", 0x50, 0x50 },
- { "WR_DFTHRSH_90", 0x60, 0x60 },
- { "WR_DFTHRSH", 0x70, 0x70 },
- { "WR_DFTHRSH_MAX", 0x70, 0x70 }
-};
-
-int
-ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(DFF_THRSH_parse_table, 18, "DFF_THRSH",
- 0xfb, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
- { "LAST_SEG_DONE", 0x01, 0x01 },
- { "LAST_SEG", 0x02, 0x02 },
- { "SG_ADDR_MASK", 0xf8, 0xf8 }
-};
-
-int
-ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SG_CACHE_SHADOW_parse_table, 3, "SG_CACHE_SHADOW",
- 0xfc, regvalue, cur_col, wrap));
-}
-
-static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
- { "LAST_SEG_DONE", 0x01, 0x01 },
- { "LAST_SEG", 0x02, 0x02 },
- { "SG_ADDR_MASK", 0xf8, 0xf8 }
-};
-
-int
-ahc_sg_cache_pre_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
- return (ahc_print_register(SG_CACHE_PRE_parse_table, 3, "SG_CACHE_PRE",
- 0xfc, regvalue, cur_col, wrap));
-}
-
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 81be6a261cc8..e4064433842e 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -147,6 +147,8 @@ void yyerror(const char *string);
%token T_ACCESS_MODE
+%token T_DONT_GENERATE_DEBUG_CODE
+
%token T_MODES
%token T_DEFINE
@@ -357,6 +359,7 @@ reg_attribute:
| size
| count
| access_mode
+| dont_generate_debug_code
| modes
| field_defn
| enum_defn
@@ -410,6 +413,13 @@ access_mode:
}
;
+dont_generate_debug_code:
+ T_DONT_GENERATE_DEBUG_CODE
+ {
+ cur_symbol->dont_generate_debug_code = 1;
+ }
+;
+
modes:
T_MODES mode_list
{
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 2c7f02daf88d..93c8667cd704 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -164,6 +164,7 @@ download { return T_DOWNLOAD; }
address { return T_ADDRESS; }
count { return T_COUNT; }
access_mode { return T_ACCESS_MODE; }
+dont_generate_debug_code { return T_DONT_GENERATE_DEBUG_CODE; }
modes { return T_MODES; }
RW|RO|WO {
if (strcmp(yytext, "RW") == 0)
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index fcd357872b43..078ed600f47a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -539,6 +539,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
aic_print_include(dfile, stock_include_file);
SLIST_FOREACH(curnode, &registers, links) {
+ if (curnode->symbol->dont_generate_debug_code)
+ continue;
+
switch(curnode->symbol->type) {
case REGISTER:
case SCBLOC:
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 05190c1a2fb7..2ba73ae7c777 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -137,7 +137,8 @@ typedef struct symbol {
struct label_info *linfo;
struct cond_info *condinfo;
struct macro_info *macroinfo;
- }info;
+ } info;
+ int dont_generate_debug_code;
} symbol_t;
typedef struct symbol_ref {
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index 69f8346aa288..5877f29a6005 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -189,7 +189,6 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
- .owner = THIS_MODULE,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
@@ -199,7 +198,6 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
@@ -209,7 +207,6 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
- .owner = THIS_MODULE,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f91f79c8007d..106c04d2d793 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -235,7 +235,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
uint32_t intmask_org;
int i, j;
- acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ acb->pmuA = pci_ioremap_bar(pdev, 0);
if (!acb->pmuA) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
acb->host->host_no);
@@ -329,13 +329,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
acb->pmuB = reg;
- mem_base0 = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ mem_base0 = pci_ioremap_bar(pdev, 0);
if (!mem_base0)
goto out;
- mem_base1 = ioremap(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
+ mem_base1 = pci_ioremap_bar(pdev, 2);
if (!mem_base1) {
iounmap(mem_base0);
goto out;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 7d311541c76c..20ca0a6374b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 1997 Wu Ching Chen
* 2.1.x update (C) 1998 Krzysztof G. Baranowski
- * 2.5.x update (C) 2002 Red Hat <alan@redhat.com>
- * 2.6.x update (C) 2004 Red Hat <alan@redhat.com>
+ * 2.5.x update (C) 2002 Red Hat
+ * 2.6.x update (C) 2004 Red Hat
*
* Marcelo Tosatti <marcelo@conectiva.com.br> : SMP fixes
*
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 3c257fe0893e..af9725409f43 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -190,7 +190,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
- MAX_RETRIES);
+ MAX_RETRIES, NULL);
dprintk("result: 0x%x\n",result);
if (driver_byte(result) & DRIVER_SENSE) {
@@ -914,9 +914,9 @@ static int ch_probe(struct device *dev)
ch->minor = minor;
sprintf(ch->name,"ch%d",ch->minor);
- class_dev = device_create_drvdata(ch_sysfs_class, dev,
- MKDEV(SCSI_CHANGER_MAJOR, ch->minor),
- ch, "s%s", ch->name);
+ class_dev = device_create(ch_sysfs_class, dev,
+ MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
+ "s%s", ch->name);
if (IS_ERR(class_dev)) {
printk(KERN_WARNING "ch%d: device_create failed\n",
ch->minor);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9785d7384199..4003deefb7d8 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
static const char * const hostbyte_table[]={
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
-"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
+"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
+"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild
new file mode 100644
index 000000000000..ee7d6d2f9c3b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kbuild
@@ -0,0 +1,4 @@
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3
+
+cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o
+obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i_ddp.o cxgb3i.o
diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgb3i/Kconfig
new file mode 100644
index 000000000000..bfdcaf5c9c57
--- /dev/null
+++ b/drivers/scsi/cxgb3i/Kconfig
@@ -0,0 +1,7 @@
+config SCSI_CXGB3_ISCSI
+ tristate "Chelsio S3xx iSCSI support"
+ depends on CHELSIO_T3_DEPENDS
+ select CHELSIO_T3
+ select SCSI_ISCSI_ATTRS
+ ---help---
+ This driver supports iSCSI offload for the Chelsio S3 series devices.
diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h
new file mode 100644
index 000000000000..fde6e4c634e7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i.h
@@ -0,0 +1,139 @@
+/*
+ * cxgb3i.h: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_H__
+#define __CXGB3I_H__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/scatterlist.h>
+#include <scsi/libiscsi_tcp.h>
+
+/* from cxgb3 LLD */
+#include "common.h"
+#include "t3_cpl.h"
+#include "t3cdev.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_offload.h"
+#include "firmware_exports.h"
+
+#include "cxgb3i_offload.h"
+#include "cxgb3i_ddp.h"
+
+#define CXGB3I_SCSI_QDEPTH_DFLT 128
+#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN
+#define CXGB3I_MAX_LUN 512
+#define ISCSI_PDU_NONPAYLOAD_MAX \
+ (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE)
+
+struct cxgb3i_adapter;
+struct cxgb3i_hba;
+struct cxgb3i_endpoint;
+
+/**
+ * struct cxgb3i_hba - cxgb3i iscsi structure (per port)
+ *
+ * @snic: cxgb3i adapter containing this port
+ * @ndev: pointer to netdev structure
+ * @shost: pointer to scsi host structure
+ */
+struct cxgb3i_hba {
+ struct cxgb3i_adapter *snic;
+ struct net_device *ndev;
+ struct Scsi_Host *shost;
+};
+
+/**
+ * struct cxgb3i_adapter - cxgb3i adapter structure (per pci)
+ *
+ * @listhead: list head to link elements
+ * @lock: lock for this structure
+ * @tdev: pointer to t3cdev used by cxgb3 driver
+ * @pdev: pointer to pci dev
+ * @hba_cnt: # of hbas (the same as # of ports)
+ * @hba: all the hbas on this adapter
+ * @tx_max_size: max. tx packet size supported
+ * @rx_max_size: max. rx packet size supported
+ * @tag_format: ddp tag format settings
+ */
+struct cxgb3i_adapter {
+ struct list_head list_head;
+ spinlock_t lock;
+ struct t3cdev *tdev;
+ struct pci_dev *pdev;
+ unsigned char hba_cnt;
+ struct cxgb3i_hba *hba[MAX_NPORTS];
+
+ unsigned int tx_max_size;
+ unsigned int rx_max_size;
+
+ struct cxgb3i_tag_format tag_format;
+};
+
+/**
+ * struct cxgb3i_conn - cxgb3i iscsi connection
+ *
+ * @listhead: list head to link elements
+ * @cep: pointer to iscsi_endpoint structure
+ * @conn: pointer to iscsi_conn structure
+ * @hba: pointer to the hba this conn. is going through
+ * @task_idx_bits: # of bits needed for session->cmds_max
+ */
+struct cxgb3i_conn {
+ struct list_head list_head;
+ struct cxgb3i_endpoint *cep;
+ struct iscsi_conn *conn;
+ struct cxgb3i_hba *hba;
+ unsigned int task_idx_bits;
+};
+
+/**
+ * struct cxgb3i_endpoint - iscsi tcp endpoint
+ *
+ * @c3cn: the h/w tcp connection representation
+ * @hba: pointer to the hba this conn. is going through
+ * @cconn: pointer to the associated cxgb3i iscsi connection
+ */
+struct cxgb3i_endpoint {
+ struct s3_conn *c3cn;
+ struct cxgb3i_hba *hba;
+ struct cxgb3i_conn *cconn;
+};
+
+int cxgb3i_iscsi_init(void);
+void cxgb3i_iscsi_cleanup(void);
+
+struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *);
+void cxgb3i_adapter_remove(struct t3cdev *);
+int cxgb3i_adapter_ulp_init(struct cxgb3i_adapter *);
+void cxgb3i_adapter_ulp_cleanup(struct cxgb3i_adapter *);
+
+struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
+struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
+ struct net_device *);
+void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
+
+int cxgb3i_pdu_init(void);
+void cxgb3i_pdu_cleanup(void);
+void cxgb3i_conn_cleanup_task(struct iscsi_task *);
+int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8);
+int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int);
+int cxgb3i_conn_xmit_pdu(struct iscsi_task *);
+
+void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt);
+int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt);
+
+#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
new file mode 100644
index 000000000000..1a41f04264f7
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -0,0 +1,770 @@
+/*
+ * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/skbuff.h>
+
+/* from cxgb3 LLD */
+#include "common.h"
+#include "t3_cpl.h"
+#include "t3cdev.h"
+#include "cxgb3_ctl_defs.h"
+#include "cxgb3_offload.h"
+#include "firmware_exports.h"
+
+#include "cxgb3i_ddp.h"
+
+#define DRV_MODULE_NAME "cxgb3i_ddp"
+#define DRV_MODULE_VERSION "1.0.0"
+#define DRV_MODULE_RELDATE "Dec. 1, 2008"
+
+static char version[] =
+ "Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
+MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
+#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
+#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
+
+#ifdef __DEBUG_CXGB3I_DDP__
+#define ddp_log_debug(fmt, args...) \
+ printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
+#else
+#define ddp_log_debug(fmt...)
+#endif
+
+/*
+ * iSCSI Direct Data Placement
+ *
+ * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
+ * pre-posted final destination host-memory buffers based on the Initiator
+ * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
+ *
+ * The host memory address is programmed into h/w in the format of pagepod
+ * entries.
+ * The location of the pagepod entry is encoded into ddp tag which is used or
+ * is the base for ITT/TTT.
+ */
+
+#define DDP_PGIDX_MAX 4
+#define DDP_THRESHOLD 2048
+static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
+static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
+static unsigned char page_idx = DDP_PGIDX_MAX;
+
+static LIST_HEAD(cxgb3i_ddp_list);
+static DEFINE_RWLOCK(cxgb3i_ddp_rwlock);
+
+/*
+ * functions to program the pagepod in h/w
+ */
+static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
+{
+ struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
+
+ req->wr.wr_lo = 0;
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
+ req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
+ V_ULPTX_CMD(ULP_MEM_WRITE));
+ req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
+ V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
+}
+
+static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr,
+ unsigned int idx, unsigned int npods,
+ struct cxgb3i_gather_list *gl)
+{
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+ int i;
+
+ for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ struct sk_buff *skb = ddp->gl_skb[idx];
+ struct pagepod *ppod;
+ int j, pidx;
+
+ /* hold on to the skb until we clear the ddp mapping */
+ skb_get(skb);
+
+ ulp_mem_io_set_hdr(skb, pm_addr);
+ ppod = (struct pagepod *)
+ (skb->head + sizeof(struct ulp_mem_io));
+ memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod));
+ for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx)
+ ppod->addr[j] = pidx < gl->nelem ?
+ cpu_to_be64(gl->phys_addr[pidx]) : 0UL;
+
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(ddp->tdev, skb);
+ }
+ return 0;
+}
+
+static int clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int idx,
+ unsigned int npods)
+{
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+ int i;
+
+ for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ struct sk_buff *skb = ddp->gl_skb[idx];
+
+ ddp->gl_skb[idx] = NULL;
+ memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE);
+ ulp_mem_io_set_hdr(skb, pm_addr);
+ skb->priority = CPL_PRIORITY_CONTROL;
+ cxgb3_ofld_send(ddp->tdev, skb);
+ }
+ return 0;
+}
+
+static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp,
+ int start, int max, int count,
+ struct cxgb3i_gather_list *gl)
+{
+ unsigned int i, j;
+
+ spin_lock(&ddp->map_lock);
+ for (i = start; i <= max;) {
+ for (j = 0; j < count; j++) {
+ if (ddp->gl_map[i + j])
+ break;
+ }
+ if (j == count) {
+ for (j = 0; j < count; j++)
+ ddp->gl_map[i + j] = gl;
+ spin_unlock(&ddp->map_lock);
+ return i;
+ }
+ i += j + 1;
+ }
+ spin_unlock(&ddp->map_lock);
+ return -EBUSY;
+}
+
+static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp,
+ int start, int count)
+{
+ spin_lock(&ddp->map_lock);
+ memset(&ddp->gl_map[start], 0,
+ count * sizeof(struct cxgb3i_gather_list *));
+ spin_unlock(&ddp->map_lock);
+}
+
+static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp,
+ int idx, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++, idx++)
+ if (ddp->gl_skb[idx]) {
+ kfree_skb(ddp->gl_skb[idx]);
+ ddp->gl_skb[idx] = NULL;
+ }
+}
+
+static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx,
+ int count, gfp_t gfp)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, gfp);
+ if (skb) {
+ ddp->gl_skb[idx + i] = skb;
+ skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE);
+ } else {
+ ddp_free_gl_skb(ddp, idx, i);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
+ * @pgsz: page size
+ * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
+ */
+int cxgb3i_ddp_find_page_index(unsigned long pgsz)
+{
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ if (pgsz == (1UL << ddp_page_shift[i]))
+ return i;
+ }
+ ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz);
+ return DDP_PGIDX_MAX;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index);
+
+static inline void ddp_gl_unmap(struct pci_dev *pdev,
+ struct cxgb3i_gather_list *gl)
+{
+ int i;
+
+ for (i = 0; i < gl->nelem; i++)
+ pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+}
+
+static inline int ddp_gl_map(struct pci_dev *pdev,
+ struct cxgb3i_gather_list *gl)
+{
+ int i;
+
+ for (i = 0; i < gl->nelem; i++) {
+ gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0,
+ PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i])))
+ goto unmap;
+ }
+
+ return i;
+
+unmap:
+ if (i) {
+ unsigned int nelem = gl->nelem;
+
+ gl->nelem = i;
+ ddp_gl_unmap(pdev, gl);
+ gl->nelem = nelem;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * cxgb3i_ddp_make_gl - build ddp page buffer list
+ * @xferlen: total buffer length
+ * @sgl: page buffer scatter-gather list
+ * @sgcnt: # of page buffers
+ * @pdev: pci_dev, used for pci map
+ * @gfp: allocation mode
+ *
+ * construct a ddp page buffer list from the scsi scattergather list.
+ * coalesce buffers as much as possible, and obtain dma addresses for
+ * each page.
+ *
+ * Return the cxgb3i_gather_list constructed from the page buffers if the
+ * memory can be used for ddp. Return NULL otherwise.
+ */
+struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
+ struct scatterlist *sgl,
+ unsigned int sgcnt,
+ struct pci_dev *pdev,
+ gfp_t gfp)
+{
+ struct cxgb3i_gather_list *gl;
+ struct scatterlist *sg = sgl;
+ struct page *sgpage = sg_page(sg);
+ unsigned int sglen = sg->length;
+ unsigned int sgoffset = sg->offset;
+ unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
+ int i = 1, j = 0;
+
+ if (xferlen < DDP_THRESHOLD) {
+ ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
+ xferlen, DDP_THRESHOLD);
+ return NULL;
+ }
+
+ gl = kzalloc(sizeof(struct cxgb3i_gather_list) +
+ npages * (sizeof(dma_addr_t) + sizeof(struct page *)),
+ gfp);
+ if (!gl)
+ return NULL;
+
+ gl->pages = (struct page **)&gl->phys_addr[npages];
+ gl->length = xferlen;
+ gl->offset = sgoffset;
+ gl->pages[0] = sgpage;
+
+ sg = sg_next(sg);
+ while (sg) {
+ struct page *page = sg_page(sg);
+
+ if (sgpage == page && sg->offset == sgoffset + sglen)
+ sglen += sg->length;
+ else {
+ /* make sure the sgl is fit for ddp:
+ * each has the same page size, and
+ * all of the middle pages are used completely
+ */
+ if ((j && sgoffset) ||
+ ((i != sgcnt - 1) &&
+ ((sglen + sgoffset) & ~PAGE_MASK)))
+ goto error_out;
+
+ j++;
+ if (j == gl->nelem || sg->offset)
+ goto error_out;
+ gl->pages[j] = page;
+ sglen = sg->length;
+ sgoffset = sg->offset;
+ sgpage = page;
+ }
+ i++;
+ sg = sg_next(sg);
+ }
+ gl->nelem = ++j;
+
+ if (ddp_gl_map(pdev, gl) < 0)
+ goto error_out;
+
+ return gl;
+
+error_out:
+ kfree(gl);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl);
+
+/**
+ * cxgb3i_ddp_release_gl - release a page buffer list
+ * @gl: a ddp page buffer list
+ * @pdev: pci_dev used for pci_unmap
+ * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
+ */
+void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
+ struct pci_dev *pdev)
+{
+ ddp_gl_unmap(pdev, gl);
+ kfree(gl);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl);
+
+/**
+ * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @tformat: tag format
+ * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
+ * ddp/hw tag
+ * @gl: the page momory list
+ * @gfp: allocation mode
+ *
+ * ddp setup for a given page buffer list and construct the ddp tag.
+ * return 0 if success, < 0 otherwise.
+ */
+int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid,
+ struct cxgb3i_tag_format *tformat, u32 *tagp,
+ struct cxgb3i_gather_list *gl, gfp_t gfp)
+{
+ struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
+ struct pagepod_hdr hdr;
+ unsigned int npods;
+ int idx = -1, idx_max;
+ int err = -ENOMEM;
+ u32 sw_tag = *tagp;
+ u32 tag;
+
+ if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem ||
+ gl->length < DDP_THRESHOLD) {
+ ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
+ page_idx, gl->length, DDP_THRESHOLD);
+ return -EINVAL;
+ }
+
+ npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ idx_max = ddp->nppods - npods + 1;
+
+ if (ddp->idx_last == ddp->nppods)
+ idx = ddp_find_unused_entries(ddp, 0, idx_max, npods, gl);
+ else {
+ idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
+ idx_max, npods, gl);
+ if (idx < 0 && ddp->idx_last >= npods)
+ idx = ddp_find_unused_entries(ddp, 0,
+ ddp->idx_last - npods + 1,
+ npods, gl);
+ }
+ if (idx < 0) {
+ ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
+ gl->length, gl->nelem, npods);
+ return idx;
+ }
+
+ err = ddp_alloc_gl_skb(ddp, idx, npods, gfp);
+ if (err < 0)
+ goto unmark_entries;
+
+ tag = cxgb3i_ddp_tag_base(tformat, sw_tag);
+ tag |= idx << PPOD_IDX_SHIFT;
+
+ hdr.rsvd = 0;
+ hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid));
+ hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
+ hdr.maxoffset = htonl(gl->length);
+ hdr.pgoffset = htonl(gl->offset);
+
+ err = set_ddp_map(ddp, &hdr, idx, npods, gl);
+ if (err < 0)
+ goto free_gl_skb;
+
+ ddp->idx_last = idx;
+ ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
+ gl->length, gl->nelem, gl->offset, tid, sw_tag, tag,
+ idx, npods);
+ *tagp = tag;
+ return 0;
+
+free_gl_skb:
+ ddp_free_gl_skb(ddp, idx, npods);
+unmark_entries:
+ ddp_unmark_entries(ddp, idx, npods);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve);
+
+/**
+ * cxgb3i_ddp_tag_release - release a ddp tag
+ * @tdev: t3cdev adapter
+ * @tag: ddp tag
+ * ddp cleanup for a given ddp tag and release all the resources held
+ */
+void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag)
+{
+ struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
+ u32 idx;
+
+ if (!ddp) {
+ ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag);
+ return;
+ }
+
+ idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
+ if (idx < ddp->nppods) {
+ struct cxgb3i_gather_list *gl = ddp->gl_map[idx];
+ unsigned int npods;
+
+ if (!gl) {
+ ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
+ tag, idx);
+ return;
+ }
+ npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
+ tag, idx, npods);
+ clear_ddp_map(ddp, idx, npods);
+ ddp_unmark_entries(ddp, idx, npods);
+ cxgb3i_ddp_release_gl(gl, ddp->pdev);
+ } else
+ ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
+ tag, idx, ddp->nppods);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release);
+
+static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx,
+ int reply)
+{
+ struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ struct cpl_set_tcb_field *req;
+ u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
+
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode and page size */
+ req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->cpu_idx = 0;
+ req->word = htons(31);
+ req->mask = cpu_to_be64(0xF0000000);
+ req->val = cpu_to_be64(val << 28);
+ skb->priority = CPL_PRIORITY_CONTROL;
+
+ cxgb3_ofld_send(tdev, skb);
+ return 0;
+}
+
+/**
+ * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @reply: request reply from h/w
+ * set up the ddp page size based on the host PAGE_SIZE for a connection
+ * identified by tid
+ */
+int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid,
+ int reply)
+{
+ return setup_conn_pgidx(tdev, tid, page_idx, reply);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize);
+
+/**
+ * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @reply: request reply from h/w
+ * @pgsz: ddp page size
+ * set up the ddp page size for a connection identified by tid
+ */
+int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid,
+ int reply, unsigned long pgsz)
+{
+ int pgidx = cxgb3i_ddp_find_page_index(pgsz);
+
+ return setup_conn_pgidx(tdev, tid, pgidx, reply);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize);
+
+/**
+ * cxgb3i_setup_conn_digest - setup conn. digest setting
+ * @tdev: t3cdev adapter
+ * @tid: connection id
+ * @hcrc: header digest enabled
+ * @dcrc: data digest enabled
+ * @reply: request reply from h/w
+ * set up the iscsi digest settings for a connection identified by tid
+ */
+int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid,
+ int hcrc, int dcrc, int reply)
+{
+ struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
+ GFP_KERNEL);
+ struct cpl_set_tcb_field *req;
+ u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
+
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode and page size */
+ req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+ req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->cpu_idx = 0;
+ req->word = htons(31);
+ req->mask = cpu_to_be64(0x0F000000);
+ req->val = cpu_to_be64(val << 24);
+ skb->priority = CPL_PRIORITY_CONTROL;
+
+ cxgb3_ofld_send(tdev, skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest);
+
+static int ddp_init(struct t3cdev *tdev)
+{
+ struct cxgb3i_ddp_info *ddp;
+ struct ulp_iscsi_info uinfo;
+ unsigned int ppmax, bits;
+ int i, err;
+ static int vers_printed;
+
+ if (!vers_printed) {
+ printk(KERN_INFO "%s", version);
+ vers_printed = 1;
+ }
+
+ err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
+ if (err < 0) {
+ ddp_log_error("%s, failed to get iscsi param err=%d.\n",
+ tdev->name, err);
+ return err;
+ }
+
+ ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
+ bits = __ilog2_u32(ppmax) + 1;
+ if (bits > PPOD_IDX_MAX_SIZE)
+ bits = PPOD_IDX_MAX_SIZE;
+ ppmax = (1 << (bits - 1)) - 1;
+
+ ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) +
+ ppmax *
+ (sizeof(struct cxgb3i_gather_list *) +
+ sizeof(struct sk_buff *)),
+ GFP_KERNEL);
+ if (!ddp) {
+ ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
+ tdev->name, ppmax);
+ return 0;
+ }
+ ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1);
+ ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
+ ppmax *
+ sizeof(struct cxgb3i_gather_list *));
+ spin_lock_init(&ddp->map_lock);
+
+ ddp->tdev = tdev;
+ ddp->pdev = uinfo.pdev;
+ ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE);
+ ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE);
+ ddp->llimit = uinfo.llimit;
+ ddp->ulimit = uinfo.ulimit;
+ ddp->nppods = ppmax;
+ ddp->idx_last = ppmax;
+ ddp->idx_bits = bits;
+ ddp->idx_mask = (1 << bits) - 1;
+ ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
+
+ uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
+ for (i = 0; i < DDP_PGIDX_MAX; i++)
+ uinfo.pgsz_factor[i] = ddp_page_order[i];
+ uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
+
+ err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
+ if (err < 0) {
+ ddp_log_warn("%s unable to set iscsi param err=%d, "
+ "ddp disabled.\n", tdev->name, err);
+ goto free_ddp_map;
+ }
+
+ tdev->ulp_iscsi = ddp;
+
+ /* add to the list */
+ write_lock(&cxgb3i_ddp_rwlock);
+ list_add_tail(&ddp->list, &cxgb3i_ddp_list);
+ write_unlock(&cxgb3i_ddp_rwlock);
+
+ ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
+ "pkt %u,%u.\n",
+ ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
+ ddp->idx_mask, ddp->rsvd_tag_mask,
+ ddp->max_txsz, ddp->max_rxsz);
+ return 0;
+
+free_ddp_map:
+ cxgb3i_free_big_mem(ddp);
+ return err;
+}
+
+/**
+ * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
+ * @tdev: t3cdev adapter
+ * @tformat: tag format
+ * @txsz: max tx pkt size, filled in by this func.
+ * @rxsz: max rx pkt size, filled in by this func.
+ * initialize the ddp pagepod manager for a given adapter if needed and
+ * setup the tag format for a given iscsi entity
+ */
+int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
+ struct cxgb3i_tag_format *tformat,
+ unsigned int *txsz, unsigned int *rxsz)
+{
+ struct cxgb3i_ddp_info *ddp;
+ unsigned char idx_bits;
+
+ if (!tformat)
+ return -EINVAL;
+
+ if (!tdev->ulp_iscsi) {
+ int err = ddp_init(tdev);
+ if (err < 0)
+ return err;
+ }
+ ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
+
+ idx_bits = 32 - tformat->sw_bits;
+ tformat->rsvd_bits = ddp->idx_bits;
+ tformat->rsvd_shift = PPOD_IDX_SHIFT;
+ tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1;
+
+ ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
+ tformat->sw_bits, tformat->rsvd_bits,
+ tformat->rsvd_shift, tformat->rsvd_mask);
+
+ *txsz = ddp->max_txsz;
+ *rxsz = ddp->max_rxsz;
+ ddp_log_info("ddp max pkt size: %u, %u.\n",
+ ddp->max_txsz, ddp->max_rxsz);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
+
+static void ddp_release(struct cxgb3i_ddp_info *ddp)
+{
+ int i = 0;
+ struct t3cdev *tdev = ddp->tdev;
+
+ tdev->ulp_iscsi = NULL;
+ while (i < ddp->nppods) {
+ struct cxgb3i_gather_list *gl = ddp->gl_map[i];
+ if (gl) {
+ int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
+ >> PPOD_PAGES_SHIFT;
+
+ kfree(gl);
+ ddp_free_gl_skb(ddp, i, npods);
+ } else
+ i++;
+ }
+ cxgb3i_free_big_mem(ddp);
+}
+
+/**
+ * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
+ * @tdev: t3cdev adapter
+ * release all the resource held by the ddp pagepod manager for a given
+ * adapter if needed
+ */
+void cxgb3i_adapter_ddp_cleanup(struct t3cdev *tdev)
+{
+ struct cxgb3i_ddp_info *ddp;
+
+ /* remove from the list */
+ write_lock(&cxgb3i_ddp_rwlock);
+ list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
+ if (ddp->tdev == tdev) {
+ list_del(&ddp->list);
+ break;
+ }
+ }
+ write_unlock(&cxgb3i_ddp_rwlock);
+
+ if (ddp)
+ ddp_release(ddp);
+}
+EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup);
+
+/**
+ * cxgb3i_ddp_init_module - module init entry point
+ * initialize any driver wide global data structures
+ */
+static int __init cxgb3i_ddp_init_module(void)
+{
+ page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+ ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
+ PAGE_SIZE, page_idx);
+ return 0;
+}
+
+/**
+ * cxgb3i_ddp_exit_module - module cleanup/exit entry point
+ * go through the ddp list and release any resource held.
+ */
+static void __exit cxgb3i_ddp_exit_module(void)
+{
+ struct cxgb3i_ddp_info *ddp;
+
+ /* release all ddp manager if there is any */
+ write_lock(&cxgb3i_ddp_rwlock);
+ list_for_each_entry(ddp, &cxgb3i_ddp_list, list) {
+ list_del(&ddp->list);
+ ddp_release(ddp);
+ }
+ write_unlock(&cxgb3i_ddp_rwlock);
+}
+
+module_init(cxgb3i_ddp_init_module);
+module_exit(cxgb3i_ddp_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
new file mode 100644
index 000000000000..5c7c4d95c493
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -0,0 +1,306 @@
+/*
+ * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_ULP2_DDP_H__
+#define __CXGB3I_ULP2_DDP_H__
+
+/**
+ * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
+ *
+ * @sw_bits: # of bits used by iscsi software layer
+ * @rsvd_bits: # of bits used by h/w
+ * @rsvd_shift: h/w bits shift left
+ * @rsvd_mask: reserved bit mask
+ */
+struct cxgb3i_tag_format {
+ unsigned char sw_bits;
+ unsigned char rsvd_bits;
+ unsigned char rsvd_shift;
+ unsigned char filler[1];
+ u32 rsvd_mask;
+};
+
+/**
+ * struct cxgb3i_gather_list - cxgb3i direct data placement memory
+ *
+ * @tag: ddp tag
+ * @length: total data buffer length
+ * @offset: initial offset to the 1st page
+ * @nelem: # of pages
+ * @pages: page pointers
+ * @phys_addr: physical address
+ */
+struct cxgb3i_gather_list {
+ u32 tag;
+ unsigned int length;
+ unsigned int offset;
+ unsigned int nelem;
+ struct page **pages;
+ dma_addr_t phys_addr[0];
+};
+
+/**
+ * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
+ *
+ * @list: list head to link elements
+ * @tdev: pointer to t3cdev used by cxgb3 driver
+ * @max_txsz: max tx packet size for ddp
+ * @max_rxsz: max rx packet size for ddp
+ * @llimit: lower bound of the page pod memory
+ * @ulimit: upper bound of the page pod memory
+ * @nppods: # of page pod entries
+ * @idx_last: page pod entry last used
+ * @idx_bits: # of bits the pagepod index would take
+ * @idx_mask: pagepod index mask
+ * @rsvd_tag_mask: tag mask
+ * @map_lock: lock to synchonize access to the page pod map
+ * @gl_map: ddp memory gather list
+ * @gl_skb: skb used to program the pagepod
+ */
+struct cxgb3i_ddp_info {
+ struct list_head list;
+ struct t3cdev *tdev;
+ struct pci_dev *pdev;
+ unsigned int max_txsz;
+ unsigned int max_rxsz;
+ unsigned int llimit;
+ unsigned int ulimit;
+ unsigned int nppods;
+ unsigned int idx_last;
+ unsigned char idx_bits;
+ unsigned char filler[3];
+ u32 idx_mask;
+ u32 rsvd_tag_mask;
+ spinlock_t map_lock;
+ struct cxgb3i_gather_list **gl_map;
+ struct sk_buff **gl_skb;
+};
+
+#define ULP2_MAX_PKT_SIZE 16224
+#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
+#define PPOD_PAGES_MAX 4
+#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
+
+/*
+ * struct pagepod_hdr, pagepod - pagepod format
+ */
+struct pagepod_hdr {
+ u32 vld_tid;
+ u32 pgsz_tag_clr;
+ u32 maxoffset;
+ u32 pgoffset;
+ u64 rsvd;
+};
+
+struct pagepod {
+ struct pagepod_hdr hdr;
+ u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+#define PPOD_SIZE sizeof(struct pagepod) /* 64 */
+#define PPOD_SIZE_SHIFT 6
+
+#define PPOD_COLOR_SHIFT 0
+#define PPOD_COLOR_SIZE 6
+#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1)
+
+#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE
+#define PPOD_IDX_MAX_SIZE 24
+
+#define S_PPOD_TID 0
+#define M_PPOD_TID 0xFFFFFF
+#define V_PPOD_TID(x) ((x) << S_PPOD_TID)
+
+#define S_PPOD_VALID 24
+#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID)
+#define F_PPOD_VALID V_PPOD_VALID(1U)
+
+#define S_PPOD_COLOR 0
+#define M_PPOD_COLOR 0x3F
+#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR)
+
+#define S_PPOD_TAG 6
+#define M_PPOD_TAG 0xFFFFFF
+#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG)
+
+#define S_PPOD_PGSZ 30
+#define M_PPOD_PGSZ 0x3
+#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ)
+
+/*
+ * large memory chunk allocation/release
+ * use vmalloc() if kmalloc() fails
+ */
+static inline void *cxgb3i_alloc_big_mem(unsigned int size,
+ gfp_t gfp)
+{
+ void *p = kmalloc(size, gfp);
+ if (!p)
+ p = vmalloc(size);
+ if (p)
+ memset(p, 0, size);
+ return p;
+}
+
+static inline void cxgb3i_free_big_mem(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
+/*
+ * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and
+ * non-reserved bits that can be used by the iscsi s/w.
+ * The reserved bits are identified by the rsvd_bits and rsvd_shift fields
+ * in struct cxgb3i_tag_format.
+ *
+ * The upper most reserved bit can be used to check if a tag is ddp tag or not:
+ * if the bit is 0, the tag is a valid ddp tag
+ */
+
+/**
+ * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return true if the tag is a ddp tag, false otherwise.
+ */
+static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag)
+{
+ return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
+}
+
+/**
+ * cxgb3i_sw_tag_usable - check if a given s/w tag has enough bits left for
+ * the reserved/hw bits
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ *
+ * return true if the tag is a ddp tag, false otherwise.
+ */
+static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat,
+ u32 sw_tag)
+{
+ sw_tag >>= (32 - tformat->rsvd_bits);
+ return !sw_tag;
+}
+
+/**
+ * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ *
+ * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag.
+ */
+static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat,
+ u32 sw_tag)
+{
+ unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+ u32 mask = (1 << shift) - 1;
+
+ if (sw_tag && (sw_tag & ~mask)) {
+ u32 v1 = sw_tag & ((1 << shift) - 1);
+ u32 v2 = (sw_tag >> (shift - 1)) << shift;
+
+ return v2 | v1 | 1 << shift;
+ }
+ return sw_tag | 1 << shift;
+}
+
+/**
+ * cxgb3i_ddp_tag_base - shift the s/w tag bits so that reserved bits are not
+ * used.
+ * @tformat: tag format information
+ * @sw_tag: s/w tag to be checked
+ */
+static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat,
+ u32 sw_tag)
+{
+ u32 mask = (1 << tformat->rsvd_shift) - 1;
+
+ if (sw_tag && (sw_tag & ~mask)) {
+ u32 v1 = sw_tag & mask;
+ u32 v2 = sw_tag >> tformat->rsvd_shift;
+
+ v2 <<= tformat->rsvd_shift + tformat->rsvd_bits;
+ return v2 | v1;
+ }
+ return sw_tag;
+}
+
+/**
+ * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return the reserved bits in the tag
+ */
+static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat,
+ u32 tag)
+{
+ if (cxgb3i_is_ddp_tag(tformat, tag))
+ return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
+ return 0;
+}
+
+/**
+ * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w
+ * @tformat: tag format information
+ * @tag: tag to be checked
+ *
+ * return the non-reserved bits in the tag.
+ */
+static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat,
+ u32 tag)
+{
+ unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
+ u32 v1, v2;
+
+ if (cxgb3i_is_ddp_tag(tformat, tag)) {
+ v1 = tag & ((1 << tformat->rsvd_shift) - 1);
+ v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
+ } else {
+ u32 mask = (1 << shift) - 1;
+
+ tag &= ~(1 << shift);
+ v1 = tag & mask;
+ v2 = (tag >> 1) & ~mask;
+ }
+ return v1 | v2;
+}
+
+int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid,
+ struct cxgb3i_tag_format *, u32 *tag,
+ struct cxgb3i_gather_list *, gfp_t gfp);
+void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag);
+
+struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen,
+ struct scatterlist *sgl,
+ unsigned int sgcnt,
+ struct pci_dev *pdev,
+ gfp_t gfp);
+void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl,
+ struct pci_dev *pdev);
+
+int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid,
+ int reply);
+int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply,
+ unsigned long pgsz);
+int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid,
+ int hcrc, int dcrc, int reply);
+int cxgb3i_ddp_find_page_index(unsigned long pgsz);
+int cxgb3i_adapter_ddp_init(struct t3cdev *, struct cxgb3i_tag_format *,
+ unsigned int *txsz, unsigned int *rxsz);
+void cxgb3i_adapter_ddp_cleanup(struct t3cdev *);
+#endif
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
new file mode 100644
index 000000000000..091ecb4d9f3d
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -0,0 +1,107 @@
+/* cxgb3i_init.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include "cxgb3i.h"
+
+#define DRV_MODULE_NAME "cxgb3i"
+#define DRV_MODULE_VERSION "1.0.0"
+#define DRV_MODULE_RELDATE "Jun. 1, 2008"
+
+static char version[] =
+ "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
+MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static void open_s3_dev(struct t3cdev *);
+static void close_s3_dev(struct t3cdev *);
+
+static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
+static struct cxgb3_client t3c_client = {
+ .name = "iscsi_cxgb3",
+ .handlers = cxgb3i_cpl_handlers,
+ .add = open_s3_dev,
+ .remove = close_s3_dev,
+};
+
+/**
+ * open_s3_dev - register with cxgb3 LLD
+ * @t3dev: cxgb3 adapter instance
+ */
+static void open_s3_dev(struct t3cdev *t3dev)
+{
+ static int vers_printed;
+
+ if (!vers_printed) {
+ printk(KERN_INFO "%s", version);
+ vers_printed = 1;
+ }
+
+ cxgb3i_sdev_add(t3dev, &t3c_client);
+ cxgb3i_adapter_add(t3dev);
+}
+
+/**
+ * close_s3_dev - de-register with cxgb3 LLD
+ * @t3dev: cxgb3 adapter instance
+ */
+static void close_s3_dev(struct t3cdev *t3dev)
+{
+ cxgb3i_adapter_remove(t3dev);
+ cxgb3i_sdev_remove(t3dev);
+}
+
+/**
+ * cxgb3i_init_module - module init entry point
+ *
+ * initialize any driver wide global data structures and register itself
+ * with the cxgb3 module
+ */
+static int __init cxgb3i_init_module(void)
+{
+ int err;
+
+ err = cxgb3i_sdev_init(cxgb3i_cpl_handlers);
+ if (err < 0)
+ return err;
+
+ err = cxgb3i_iscsi_init();
+ if (err < 0)
+ return err;
+
+ err = cxgb3i_pdu_init();
+ if (err < 0)
+ return err;
+
+ cxgb3_register_client(&t3c_client);
+
+ return 0;
+}
+
+/**
+ * cxgb3i_exit_module - module cleanup/exit entry point
+ *
+ * go through the driver hba list and for each hba, release any resource held.
+ * and unregisters iscsi transport and the cxgb3 module
+ */
+static void __exit cxgb3i_exit_module(void)
+{
+ cxgb3_unregister_client(&t3c_client);
+ cxgb3i_pdu_cleanup();
+ cxgb3i_iscsi_cleanup();
+ cxgb3i_sdev_cleanup();
+}
+
+module_init(cxgb3i_init_module);
+module_exit(cxgb3i_exit_module);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
new file mode 100644
index 000000000000..d83464b9b3f9
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -0,0 +1,951 @@
+/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/inet.h>
+#include <linux/crypto.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "cxgb3i.h"
+#include "cxgb3i_pdu.h"
+
+#ifdef __DEBUG_CXGB3I_TAG__
+#define cxgb3i_tag_debug cxgb3i_log_debug
+#else
+#define cxgb3i_tag_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB3I_API__
+#define cxgb3i_api_debug cxgb3i_log_debug
+#else
+#define cxgb3i_api_debug(fmt...)
+#endif
+
+/*
+ * align pdu size to multiple of 512 for better performance
+ */
+#define align_pdu_size(n) do { n = (n) & (~511); } while (0)
+
+static struct scsi_transport_template *cxgb3i_scsi_transport;
+static struct scsi_host_template cxgb3i_host_template;
+static struct iscsi_transport cxgb3i_iscsi_transport;
+static unsigned char sw_tag_idx_bits;
+static unsigned char sw_tag_age_bits;
+
+static LIST_HEAD(cxgb3i_snic_list);
+static DEFINE_RWLOCK(cxgb3i_snic_rwlock);
+
+/**
+ * cxgb3i_adapter_add - init a s3 adapter structure and any h/w settings
+ * @t3dev: t3cdev adapter
+ * return the resulting cxgb3i_adapter struct
+ */
+struct cxgb3i_adapter *cxgb3i_adapter_add(struct t3cdev *t3dev)
+{
+ struct cxgb3i_adapter *snic;
+ struct adapter *adapter = tdev2adap(t3dev);
+ int i;
+
+ snic = kzalloc(sizeof(*snic), GFP_KERNEL);
+ if (!snic) {
+ cxgb3i_api_debug("cxgb3 %s, OOM.\n", t3dev->name);
+ return NULL;
+ }
+ spin_lock_init(&snic->lock);
+
+ snic->tdev = t3dev;
+ snic->pdev = adapter->pdev;
+ snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
+
+ if (cxgb3i_adapter_ddp_init(t3dev, &snic->tag_format,
+ &snic->tx_max_size,
+ &snic->rx_max_size) < 0)
+ goto free_snic;
+
+ for_each_port(adapter, i) {
+ snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]);
+ if (!snic->hba[i])
+ goto ulp_cleanup;
+ }
+ snic->hba_cnt = adapter->params.nports;
+
+ /* add to the list */
+ write_lock(&cxgb3i_snic_rwlock);
+ list_add_tail(&snic->list_head, &cxgb3i_snic_list);
+ write_unlock(&cxgb3i_snic_rwlock);
+
+ return snic;
+
+ulp_cleanup:
+ cxgb3i_adapter_ddp_cleanup(t3dev);
+free_snic:
+ kfree(snic);
+ return NULL;
+}
+
+/**
+ * cxgb3i_adapter_remove - release all the resources held and cleanup any
+ * h/w settings
+ * @t3dev: t3cdev adapter
+ */
+void cxgb3i_adapter_remove(struct t3cdev *t3dev)
+{
+ int i;
+ struct cxgb3i_adapter *snic;
+
+ /* remove from the list */
+ write_lock(&cxgb3i_snic_rwlock);
+ list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
+ if (snic->tdev == t3dev) {
+ list_del(&snic->list_head);
+ break;
+ }
+ }
+ write_unlock(&cxgb3i_snic_rwlock);
+
+ if (snic) {
+ for (i = 0; i < snic->hba_cnt; i++) {
+ if (snic->hba[i]) {
+ cxgb3i_hba_host_remove(snic->hba[i]);
+ snic->hba[i] = NULL;
+ }
+ }
+
+ /* release ddp resources */
+ cxgb3i_adapter_ddp_cleanup(snic->tdev);
+ kfree(snic);
+ }
+}
+
+/**
+ * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure with a given
+ * net_device
+ * @t3dev: t3cdev adapter
+ */
+struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
+{
+ struct cxgb3i_adapter *snic;
+ int i;
+
+ read_lock(&cxgb3i_snic_rwlock);
+ list_for_each_entry(snic, &cxgb3i_snic_list, list_head) {
+ for (i = 0; i < snic->hba_cnt; i++) {
+ if (snic->hba[i]->ndev == ndev) {
+ read_unlock(&cxgb3i_snic_rwlock);
+ return snic->hba[i];
+ }
+ }
+ }
+ read_unlock(&cxgb3i_snic_rwlock);
+ return NULL;
+}
+
+/**
+ * cxgb3i_hba_host_add - register a new host with scsi/iscsi
+ * @snic: the cxgb3i adapter
+ * @ndev: associated net_device
+ */
+struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic,
+ struct net_device *ndev)
+{
+ struct cxgb3i_hba *hba;
+ struct Scsi_Host *shost;
+ int err;
+
+ shost = iscsi_host_alloc(&cxgb3i_host_template,
+ sizeof(struct cxgb3i_hba),
+ CXGB3I_SCSI_QDEPTH_DFLT);
+ if (!shost) {
+ cxgb3i_log_info("iscsi_host_alloc failed.\n");
+ return NULL;
+ }
+
+ shost->transportt = cxgb3i_scsi_transport;
+ shost->max_lun = CXGB3I_MAX_LUN;
+ shost->max_id = CXGB3I_MAX_TARGET;
+ shost->max_channel = 0;
+ shost->max_cmd_len = 16;
+
+ hba = iscsi_host_priv(shost);
+ hba->snic = snic;
+ hba->ndev = ndev;
+ hba->shost = shost;
+
+ pci_dev_get(snic->pdev);
+ err = iscsi_host_add(shost, &snic->pdev->dev);
+ if (err) {
+ cxgb3i_log_info("iscsi_host_add failed.\n");
+ goto pci_dev_put;
+ }
+
+ cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
+ shost, hba, shost->host_no);
+
+ return hba;
+
+pci_dev_put:
+ pci_dev_put(snic->pdev);
+ scsi_host_put(shost);
+ return NULL;
+}
+
+/**
+ * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi
+ * @hba: the cxgb3i hba
+ */
+void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
+{
+ cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n",
+ hba->shost, hba, hba->shost->host_no);
+ iscsi_host_remove(hba->shost);
+ pci_dev_put(hba->snic->pdev);
+ iscsi_host_free(hba->shost);
+}
+
+/**
+ * cxgb3i_ep_connect - establish TCP connection to target portal
+ * @dst_addr: target IP address
+ * @non_blocking: blocking or non-blocking call
+ *
+ * Initiates a TCP/IP connection to the dst_addr
+ */
+static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
+ int non_blocking)
+{
+ struct iscsi_endpoint *ep;
+ struct cxgb3i_endpoint *cep;
+ struct cxgb3i_hba *hba;
+ struct s3_conn *c3cn = NULL;
+ int err = 0;
+
+ c3cn = cxgb3i_c3cn_create();
+ if (!c3cn) {
+ cxgb3i_log_info("ep connect OOM.\n");
+ err = -ENOMEM;
+ goto release_conn;
+ }
+
+ err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
+ if (err < 0) {
+ cxgb3i_log_info("ep connect failed.\n");
+ goto release_conn;
+ }
+ hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
+ if (!hba) {
+ err = -ENOSPC;
+ cxgb3i_log_info("NOT going through cxgbi device.\n");
+ goto release_conn;
+ }
+ if (c3cn_is_closing(c3cn)) {
+ err = -ENOSPC;
+ cxgb3i_log_info("ep connect unable to connect.\n");
+ goto release_conn;
+ }
+
+ ep = iscsi_create_endpoint(sizeof(*cep));
+ if (!ep) {
+ err = -ENOMEM;
+ cxgb3i_log_info("iscsi alloc ep, OOM.\n");
+ goto release_conn;
+ }
+ cep = ep->dd_data;
+ cep->c3cn = c3cn;
+ cep->hba = hba;
+
+ cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n",
+ ep, cep, c3cn, hba);
+ return ep;
+
+release_conn:
+ cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn);
+ if (c3cn)
+ cxgb3i_c3cn_release(c3cn);
+ return ERR_PTR(err);
+}
+
+/**
+ * cxgb3i_ep_poll - polls for TCP connection establishement
+ * @ep: TCP connection (endpoint) handle
+ * @timeout_ms: timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+ struct cxgb3i_endpoint *cep = ep->dd_data;
+ struct s3_conn *c3cn = cep->c3cn;
+
+ if (!c3cn_is_established(c3cn))
+ return 0;
+ cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn);
+ return 1;
+}
+
+/**
+ * cxgb3i_ep_disconnect - teardown TCP connection
+ * @ep: TCP connection (endpoint) handle
+ *
+ * teardown TCP connection
+ */
+static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+ struct cxgb3i_endpoint *cep = ep->dd_data;
+ struct cxgb3i_conn *cconn = cep->cconn;
+
+ cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep);
+
+ if (cconn && cconn->conn) {
+ /*
+ * stop the xmit path so the xmit_pdu function is
+ * not being called
+ */
+ iscsi_suspend_tx(cconn->conn);
+
+ write_lock_bh(&cep->c3cn->callback_lock);
+ cep->c3cn->user_data = NULL;
+ cconn->cep = NULL;
+ write_unlock_bh(&cep->c3cn->callback_lock);
+ }
+
+ cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n",
+ ep, cep, cep->c3cn);
+ cxgb3i_c3cn_release(cep->c3cn);
+ iscsi_destroy_endpoint(ep);
+}
+
+/**
+ * cxgb3i_session_create - create a new iscsi session
+ * @cmds_max: max # of commands
+ * @qdepth: scsi queue depth
+ * @initial_cmdsn: initial iscsi CMDSN for this session
+ * @host_no: pointer to return host no
+ *
+ * Creates a new iSCSI session
+ */
+static struct iscsi_cls_session *
+cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
+ u32 initial_cmdsn, u32 *host_no)
+{
+ struct cxgb3i_endpoint *cep;
+ struct cxgb3i_hba *hba;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+
+ if (!ep) {
+ cxgb3i_log_error("%s, missing endpoint.\n", __func__);
+ return NULL;
+ }
+
+ cep = ep->dd_data;
+ hba = cep->hba;
+ shost = hba->shost;
+ cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba);
+ BUG_ON(hba != iscsi_host_priv(shost));
+
+ *host_no = shost->host_no;
+
+ cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
+ cmds_max,
+ sizeof(struct iscsi_tcp_task),
+ initial_cmdsn, ISCSI_MAX_TARGET);
+ if (!cls_session)
+ return NULL;
+ session = cls_session->dd_data;
+ if (iscsi_tcp_r2tpool_alloc(session))
+ goto remove_session;
+
+ return cls_session;
+
+remove_session:
+ iscsi_session_teardown(cls_session);
+ return NULL;
+}
+
+/**
+ * cxgb3i_session_destroy - destroys iscsi session
+ * @cls_session: pointer to iscsi cls session
+ *
+ * Destroys an iSCSI session instance and releases its all resources held
+ */
+static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+ cxgb3i_api_debug("sess 0x%p.\n", cls_session);
+ iscsi_tcp_r2tpool_free(cls_session->dd_data);
+ iscsi_session_teardown(cls_session);
+}
+
+/**
+ * cxgb3i_conn_max_xmit_dlength -- check the max. xmit pdu segment size,
+ * reduce it to be within the hardware limit if needed
+ * @conn: iscsi connection
+ */
+static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
+
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ cconn->hba->snic->tx_max_size -
+ ISCSI_PDU_NONPAYLOAD_MAX);
+
+ if (conn->max_xmit_dlength)
+ conn->max_xmit_dlength = min_t(unsigned int,
+ conn->max_xmit_dlength, max);
+ else
+ conn->max_xmit_dlength = max;
+ align_pdu_size(conn->max_xmit_dlength);
+ cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
+ conn, conn->max_xmit_dlength);
+ return 0;
+}
+
+/**
+ * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size against
+ * the hardware limit
+ * @conn: iscsi connection
+ * return 0 if the value is valid, < 0 otherwise.
+ */
+static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ cconn->hba->snic->rx_max_size -
+ ISCSI_PDU_NONPAYLOAD_MAX);
+
+ align_pdu_size(max);
+ if (conn->max_recv_dlength) {
+ if (conn->max_recv_dlength > max) {
+ cxgb3i_log_error("MaxRecvDataSegmentLength %u too big."
+ " Need to be <= %u.\n",
+ conn->max_recv_dlength, max);
+ return -EINVAL;
+ }
+ conn->max_recv_dlength = min_t(unsigned int,
+ conn->max_recv_dlength, max);
+ align_pdu_size(conn->max_recv_dlength);
+ } else
+ conn->max_recv_dlength = max;
+ cxgb3i_api_debug("conn 0x%p, max recv %u.\n",
+ conn, conn->max_recv_dlength);
+ return 0;
+}
+
+/**
+ * cxgb3i_conn_create - create iscsi connection instance
+ * @cls_session: pointer to iscsi cls session
+ * @cid: iscsi cid
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session
+ *cls_session, u32 cid)
+{
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct cxgb3i_conn *cconn;
+
+ cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid);
+
+ cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+ tcp_conn = conn->dd_data;
+ cconn = tcp_conn->dd_data;
+
+ cconn->conn = conn;
+ return cls_conn;
+}
+
+/**
+ * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together
+ * @cls_session: pointer to iscsi cls session
+ * @cls_conn: pointer to iscsi cls conn
+ * @transport_eph: 64-bit EP handle
+ * @is_leading: leading connection on this session?
+ *
+ * Binds together an iSCSI session, an iSCSI connection and a
+ * TCP connection. This routine returns error code if the TCP
+ * connection does not belong on the device iSCSI sess/conn is bound
+ */
+
+static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ u64 transport_eph, int is_leading)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ struct cxgb3i_adapter *snic;
+ struct iscsi_endpoint *ep;
+ struct cxgb3i_endpoint *cep;
+ struct s3_conn *c3cn;
+ int err;
+
+ ep = iscsi_lookup_endpoint(transport_eph);
+ if (!ep)
+ return -EINVAL;
+
+ /* setup ddp pagesize */
+ cep = ep->dd_data;
+ c3cn = cep->c3cn;
+ snic = cep->hba->snic;
+ err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0);
+ if (err < 0)
+ return err;
+
+ cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n",
+ ep, cls_session, cls_conn);
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+ return -EINVAL;
+
+ /* calculate the tag idx bits needed for this conn based on cmds_max */
+ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
+ cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n",
+ conn->session->cmds_max, cconn->task_idx_bits);
+
+ read_lock(&c3cn->callback_lock);
+ c3cn->user_data = conn;
+ cconn->hba = cep->hba;
+ cconn->cep = cep;
+ cep->cconn = cconn;
+ read_unlock(&c3cn->callback_lock);
+
+ cxgb3i_conn_max_xmit_dlength(conn);
+ cxgb3i_conn_max_recv_dlength(conn);
+
+ spin_lock_bh(&conn->session->lock);
+ sprintf(conn->portal_address, NIPQUAD_FMT,
+ NIPQUAD(c3cn->daddr.sin_addr.s_addr));
+ conn->portal_port = ntohs(c3cn->daddr.sin_port);
+ spin_unlock_bh(&conn->session->lock);
+
+ /* init recv engine */
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+
+ return 0;
+}
+
+/**
+ * cxgb3i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn: pointer to iscsi cls conn
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ int len;
+
+ cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param);
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%hu\n", conn->portal_port);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+ spin_lock_bh(&conn->session->lock);
+ len = sprintf(buf, "%s\n", conn->portal_address);
+ spin_unlock_bh(&conn->session->lock);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+ }
+
+ return len;
+}
+
+/**
+ * cxgb3i_conn_set_param - set iscsi connection parameter
+ * @cls_conn: pointer to iscsi cls conn
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ * @buflen: buffer length
+ *
+ * set iSCSI connection parameters
+ */
+static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ struct cxgb3i_adapter *snic = cconn->hba->snic;
+ struct s3_conn *c3cn = cconn->cep->c3cn;
+ int value, err = 0;
+
+ switch (param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err && conn->hdrdgst_en)
+ err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err && conn->datadgst_en)
+ err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid,
+ conn->hdrdgst_en,
+ conn->datadgst_en, 0);
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%d", &value);
+ if (value <= 0 || !is_power_of_2(value))
+ return -EINVAL;
+ if (session->max_r2t == value)
+ break;
+ iscsi_tcp_r2tpool_free(session);
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err && iscsi_tcp_r2tpool_alloc(session))
+ return -ENOMEM;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err)
+ err = cxgb3i_conn_max_recv_dlength(conn);
+ break;
+ case ISCSI_PARAM_MAX_XMIT_DLENGTH:
+ err = iscsi_set_param(cls_conn, param, buf, buflen);
+ if (!err)
+ err = cxgb3i_conn_max_xmit_dlength(conn);
+ break;
+ default:
+ return iscsi_set_param(cls_conn, param, buf, buflen);
+ }
+ return err;
+}
+
+/**
+ * cxgb3i_host_set_param - configure host (adapter) related parameters
+ * @shost: scsi host pointer
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ */
+static int cxgb3i_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param,
+ char *buf, int buflen)
+{
+ struct cxgb3i_hba *hba = iscsi_host_priv(shost);
+
+ cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ {
+ __be32 addr = in_aton(buf);
+ cxgb3i_set_private_ipv4addr(hba->ndev, addr);
+ return 0;
+ }
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ /* ignore */
+ return 0;
+ default:
+ return iscsi_host_set_param(shost, param, buf, buflen);
+ }
+}
+
+/**
+ * cxgb3i_host_get_param - returns host (adapter) related parameters
+ * @shost: scsi host pointer
+ * @param: parameter type identifier
+ * @buf: buffer pointer
+ */
+static int cxgb3i_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct cxgb3i_hba *hba = iscsi_host_priv(shost);
+ int len = 0;
+
+ cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_HWADDRESS:
+ len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6);
+ break;
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+ len = sprintf(buf, "%s\n", hba->ndev->name);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ {
+ __be32 addr;
+
+ addr = cxgb3i_get_private_ipv4addr(hba->ndev);
+ len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
+ break;
+ }
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+ return len;
+}
+
+/**
+ * cxgb3i_conn_get_stats - returns iSCSI stats
+ * @cls_conn: pointer to iscsi cls conn
+ * @stats: pointer to iscsi statistic struct
+ */
+static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+ stats->digest_err = 0;
+ stats->timeout_err = 0;
+ stats->custom_length = 1;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+}
+
+/**
+ * cxgb3i_parse_itt - get the idx and age bits from a given tag
+ * @conn: iscsi connection
+ * @itt: itt tag
+ * @idx: task index, filled in by this function
+ * @age: session age, filled in by this function
+ */
+static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt,
+ int *idx, int *age)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ struct cxgb3i_adapter *snic = cconn->hba->snic;
+ u32 tag = ntohl((__force u32) itt);
+ u32 sw_bits;
+
+ sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag);
+ if (idx)
+ *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
+ if (age)
+ *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
+
+ cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n",
+ tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
+ age ? *age : 0xFF);
+}
+
+/**
+ * cxgb3i_reserve_itt - generate tag for a give task
+ * Try to set up ddp for a scsi read task.
+ * @task: iscsi task
+ * @hdr_itt: tag, filled in by this function
+ */
+int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *sess = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ struct cxgb3i_adapter *snic = cconn->hba->snic;
+ struct cxgb3i_tag_format *tformat = &snic->tag_format;
+ u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
+ u32 tag;
+ int err = -EINVAL;
+
+ if (sc &&
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+ cxgb3i_sw_tag_usable(tformat, sw_tag)) {
+ struct s3_conn *c3cn = cconn->cep->c3cn;
+ struct cxgb3i_gather_list *gl;
+
+ gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length,
+ scsi_in(sc)->table.sgl,
+ scsi_in(sc)->table.nents,
+ snic->pdev,
+ GFP_ATOMIC);
+ if (gl) {
+ tag = sw_tag;
+ err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid,
+ tformat, &tag,
+ gl, GFP_ATOMIC);
+ if (err < 0)
+ cxgb3i_ddp_release_gl(gl, snic->pdev);
+ }
+ }
+
+ if (err < 0)
+ tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag);
+ /* the itt need to sent in big-endian order */
+ *hdr_itt = (__force itt_t)htonl(tag);
+
+ cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n",
+ tag, *hdr_itt, task->itt, sess->age);
+ return 0;
+}
+
+/**
+ * cxgb3i_release_itt - release the tag for a given task
+ * if the tag is a ddp tag, release the ddp setup
+ * @task: iscsi task
+ * @hdr_itt: tag
+ */
+void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt)
+{
+ struct scsi_cmnd *sc = task->sc;
+ struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ struct cxgb3i_adapter *snic = cconn->hba->snic;
+ struct cxgb3i_tag_format *tformat = &snic->tag_format;
+ u32 tag = ntohl((__force u32)hdr_itt);
+
+ cxgb3i_tag_debug("release tag 0x%x.\n", tag);
+
+ if (sc &&
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
+ cxgb3i_is_ddp_tag(tformat, tag))
+ cxgb3i_ddp_tag_release(snic->tdev, tag);
+}
+
+/**
+ * cxgb3i_host_template -- Scsi_Host_Template structure
+ * used when registering with the scsi mid layer
+ */
+static struct scsi_host_template cxgb3i_host_template = {
+ .module = THIS_MODULE,
+ .name = "Chelsio S3xx iSCSI Initiator",
+ .proc_name = "cxgb3i",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
+ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+ .eh_device_reset_handler = iscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_target_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .this_id = -1,
+};
+
+static struct iscsi_transport cxgb3i_iscsi_transport = {
+ .owner = THIS_MODULE,
+ .name = "cxgb3i",
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
+ | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
+ CAP_PADDING_OFFLOAD,
+ .param_mask = ISCSI_MAX_RECV_DLENGTH |
+ ISCSI_MAX_XMIT_DLENGTH |
+ ISCSI_HDRDGST_EN |
+ ISCSI_DATADGST_EN |
+ ISCSI_INITIAL_R2T_EN |
+ ISCSI_MAX_R2T |
+ ISCSI_IMM_DATA_EN |
+ ISCSI_FIRST_BURST |
+ ISCSI_MAX_BURST |
+ ISCSI_PDU_INORDER_EN |
+ ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_ERL |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME | ISCSI_TPGT |
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO |
+ ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME,
+ .get_host_param = cxgb3i_host_get_param,
+ .set_host_param = cxgb3i_host_set_param,
+ /* session management */
+ .create_session = cxgb3i_session_create,
+ .destroy_session = cxgb3i_session_destroy,
+ .get_session_param = iscsi_session_get_param,
+ /* connection management */
+ .create_conn = cxgb3i_conn_create,
+ .bind_conn = cxgb3i_conn_bind,
+ .destroy_conn = iscsi_tcp_conn_teardown,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_conn_stop,
+ .get_conn_param = cxgb3i_conn_get_param,
+ .set_param = cxgb3i_conn_set_param,
+ .get_stats = cxgb3i_conn_get_stats,
+ /* pdu xmit req. from user space */
+ .send_pdu = iscsi_conn_send_pdu,
+ /* task */
+ .init_task = iscsi_tcp_task_init,
+ .xmit_task = iscsi_tcp_task_xmit,
+ .cleanup_task = cxgb3i_conn_cleanup_task,
+
+ /* pdu */
+ .alloc_pdu = cxgb3i_conn_alloc_pdu,
+ .init_pdu = cxgb3i_conn_init_pdu,
+ .xmit_pdu = cxgb3i_conn_xmit_pdu,
+ .parse_pdu_itt = cxgb3i_parse_itt,
+
+ /* TCP connect/disconnect */
+ .ep_connect = cxgb3i_ep_connect,
+ .ep_poll = cxgb3i_ep_poll,
+ .ep_disconnect = cxgb3i_ep_disconnect,
+ /* Error recovery timeout call */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+};
+
+int cxgb3i_iscsi_init(void)
+{
+ sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
+ sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
+ cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
+ ISCSI_ITT_MASK, sw_tag_idx_bits,
+ ISCSI_AGE_MASK, sw_tag_age_bits);
+
+ cxgb3i_scsi_transport =
+ iscsi_register_transport(&cxgb3i_iscsi_transport);
+ if (!cxgb3i_scsi_transport) {
+ cxgb3i_log_error("Could not register cxgb3i transport.\n");
+ return -ENODEV;
+ }
+ cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport);
+ return 0;
+}
+
+void cxgb3i_iscsi_cleanup(void)
+{
+ if (cxgb3i_scsi_transport) {
+ cxgb3i_api_debug("cxgb3i transport 0x%p.\n",
+ cxgb3i_scsi_transport);
+ iscsi_unregister_transport(&cxgb3i_iscsi_transport);
+ }
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
new file mode 100644
index 000000000000..a865f1fefe8b
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -0,0 +1,1810 @@
+/*
+ * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ *
+ * Written by: Dimitris Michailidis (dm@chelsio.com)
+ * Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/version.h>
+
+#include "cxgb3_defs.h"
+#include "cxgb3_ctl_defs.h"
+#include "firmware_exports.h"
+#include "cxgb3i_offload.h"
+#include "cxgb3i_pdu.h"
+#include "cxgb3i_ddp.h"
+
+#ifdef __DEBUG_C3CN_CONN__
+#define c3cn_conn_debug cxgb3i_log_info
+#else
+#define c3cn_conn_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_C3CN_TX__
+#define c3cn_tx_debug cxgb3i_log_debug
+#else
+#define c3cn_tx_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_C3CN_RX__
+#define c3cn_rx_debug cxgb3i_log_debug
+#else
+#define c3cn_rx_debug(fmt...)
+#endif
+
+/*
+ * module parameters releated to offloaded iscsi connection
+ */
+static int cxgb3_rcv_win = 256 * 1024;
+module_param(cxgb3_rcv_win, int, 0644);
+MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
+
+static int cxgb3_snd_win = 64 * 1024;
+module_param(cxgb3_snd_win, int, 0644);
+MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)");
+
+static int cxgb3_rx_credit_thres = 10 * 1024;
+module_param(cxgb3_rx_credit_thres, int, 0644);
+MODULE_PARM_DESC(rx_credit_thres,
+ "RX credits return threshold in bytes (default=10KB)");
+
+static unsigned int cxgb3_max_connect = 8 * 1024;
+module_param(cxgb3_max_connect, uint, 0644);
+MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)");
+
+static unsigned int cxgb3_sport_base = 20000;
+module_param(cxgb3_sport_base, uint, 0644);
+MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)");
+
+/*
+ * cxgb3i tcp connection data(per adapter) list
+ */
+static LIST_HEAD(cdata_list);
+static DEFINE_RWLOCK(cdata_rwlock);
+
+static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion);
+static void c3cn_release_offload_resources(struct s3_conn *c3cn);
+
+/*
+ * iscsi source port management
+ *
+ * Find a free source port in the port allocation map. We use a very simple
+ * rotor scheme to look for the next free port.
+ *
+ * If a source port has been specified make sure that it doesn't collide with
+ * our normal source port allocation map. If it's outside the range of our
+ * allocation/deallocation scheme just let them use it.
+ *
+ * If the source port is outside our allocation range, the caller is
+ * responsible for keeping track of their port usage.
+ */
+static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata)
+{
+ unsigned int start;
+ int idx;
+
+ if (!cdata)
+ goto error_out;
+
+ if (c3cn->saddr.sin_port != 0) {
+ idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
+ if (idx < 0 || idx >= cxgb3_max_connect)
+ return 0;
+ if (!test_and_set_bit(idx, cdata->sport_map))
+ return -EADDRINUSE;
+ }
+
+ /* the sport_map_next may not be accurate but that is okay, sport_map
+ should be */
+ start = idx = cdata->sport_map_next;
+ do {
+ if (++idx >= cxgb3_max_connect)
+ idx = 0;
+ if (!(test_and_set_bit(idx, cdata->sport_map))) {
+ c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx);
+ cdata->sport_map_next = idx;
+ c3cn_conn_debug("%s reserve port %u.\n",
+ cdata->cdev->name,
+ cxgb3_sport_base + idx);
+ return 0;
+ }
+ } while (idx != start);
+
+error_out:
+ return -EADDRNOTAVAIL;
+}
+
+static void c3cn_put_port(struct s3_conn *c3cn)
+{
+ struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev);
+
+ if (c3cn->saddr.sin_port) {
+ int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base;
+
+ c3cn->saddr.sin_port = 0;
+ if (idx < 0 || idx >= cxgb3_max_connect)
+ return;
+ clear_bit(idx, cdata->sport_map);
+ c3cn_conn_debug("%s, release port %u.\n",
+ cdata->cdev->name, cxgb3_sport_base + idx);
+ }
+}
+
+static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+ __set_bit(flag, &c3cn->flags);
+ c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n",
+ c3cn, flag, c3cn->state, c3cn->flags);
+}
+
+static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+ __clear_bit(flag, &c3cn->flags);
+ c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n",
+ c3cn, flag, c3cn->state, c3cn->flags);
+}
+
+static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag)
+{
+ if (c3cn == NULL)
+ return 0;
+ return test_bit(flag, &c3cn->flags);
+}
+
+static void c3cn_set_state(struct s3_conn *c3cn, int state)
+{
+ c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state);
+ c3cn->state = state;
+}
+
+static inline void c3cn_hold(struct s3_conn *c3cn)
+{
+ atomic_inc(&c3cn->refcnt);
+}
+
+static inline void c3cn_put(struct s3_conn *c3cn)
+{
+ if (atomic_dec_and_test(&c3cn->refcnt)) {
+ c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+ kfree(c3cn);
+ }
+}
+
+static void c3cn_closed(struct s3_conn *c3cn)
+{
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ c3cn_put_port(c3cn);
+ c3cn_release_offload_resources(c3cn);
+ c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
+ cxgb3i_conn_closing(c3cn);
+}
+
+/*
+ * CPL (Chelsio Protocol Language) defines a message passing interface between
+ * the host driver and T3 asic.
+ * The section below implments CPLs that related to iscsi tcp connection
+ * open/close/abort and data send/receive.
+ */
+
+/*
+ * CPL connection active open request: host ->
+ */
+static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
+{
+ int i = 0;
+
+ while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
+ ++i;
+ return i;
+}
+
+static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu)
+{
+ unsigned int idx;
+ struct dst_entry *dst = c3cn->dst_cache;
+ struct t3cdev *cdev = c3cn->cdev;
+ const struct t3c_data *td = T3C_DATA(cdev);
+ u16 advmss = dst_metric(dst, RTAX_ADVMSS);
+
+ if (advmss > pmtu - 40)
+ advmss = pmtu - 40;
+ if (advmss < td->mtus[0] - 40)
+ advmss = td->mtus[0] - 40;
+ idx = find_best_mtu(td, advmss + 40);
+ return idx;
+}
+
+static inline int compute_wscale(int win)
+{
+ int wscale = 0;
+ while (wscale < 14 && (65535<<wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static inline unsigned int calc_opt0h(struct s3_conn *c3cn)
+{
+ int wscale = compute_wscale(cxgb3_rcv_win);
+ return V_KEEP_ALIVE(1) |
+ F_TCAM_BYPASS |
+ V_WND_SCALE(wscale) |
+ V_MSS_IDX(c3cn->mss_idx);
+}
+
+static inline unsigned int calc_opt0l(struct s3_conn *c3cn)
+{
+ return V_ULP_MODE(ULP_MODE_ISCSI) |
+ V_RCV_BUFSIZ(cxgb3_rcv_win>>10);
+}
+
+static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb,
+ unsigned int atid, const struct l2t_entry *e)
+{
+ struct cpl_act_open_req *req;
+
+ c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid);
+
+ skb->priority = CPL_PRIORITY_SETUP;
+ req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid));
+ req->local_port = c3cn->saddr.sin_port;
+ req->peer_port = c3cn->daddr.sin_port;
+ req->local_ip = c3cn->saddr.sin_addr.s_addr;
+ req->peer_ip = c3cn->daddr.sin_addr.s_addr;
+ req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) |
+ V_TX_CHANNEL(e->smt_idx));
+ req->opt0l = htonl(calc_opt0l(c3cn));
+ req->params = 0;
+}
+
+static void fail_act_open(struct s3_conn *c3cn, int errno)
+{
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+ c3cn->err = errno;
+ c3cn_closed(c3cn);
+}
+
+static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
+{
+ struct s3_conn *c3cn = (struct s3_conn *)skb->sk;
+
+ c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+ c3cn_hold(c3cn);
+ spin_lock_bh(&c3cn->lock);
+ if (c3cn->state == C3CN_STATE_CONNECTING)
+ fail_act_open(c3cn, EHOSTUNREACH);
+ spin_unlock_bh(&c3cn->lock);
+ c3cn_put(c3cn);
+ __kfree_skb(skb);
+}
+
+/*
+ * CPL connection close request: host ->
+ *
+ * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
+ * the write queue (i.e., after any unsent txt data).
+ */
+static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
+ int flags)
+{
+ CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
+ CXGB3_SKB_CB(skb)->flags = flags;
+ __skb_queue_tail(&c3cn->write_queue, skb);
+}
+
+static void send_close_req(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb = c3cn->cpl_close;
+ struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
+ unsigned int tid = c3cn->tid;
+
+ c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ c3cn->cpl_close = NULL;
+
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
+ req->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ req->rsvd = htonl(c3cn->write_seq);
+
+ skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND);
+ if (c3cn->state != C3CN_STATE_CONNECTING)
+ c3cn_push_tx_frames(c3cn, 1);
+}
+
+/*
+ * CPL connection abort request: host ->
+ *
+ * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
+ * for the same connection and also that we do not try to send a message
+ * after the connection has closed.
+ */
+static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb)
+{
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ c3cn_conn_debug("tdev 0x%p.\n", cdev);
+
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgb3_ofld_send(cdev, skb);
+}
+
+static inline void c3cn_purge_write_queue(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(&c3cn->write_queue)))
+ __kfree_skb(skb);
+}
+
+static void send_abort_req(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb = c3cn->cpl_abort_req;
+ struct cpl_abort_req *req;
+ unsigned int tid = c3cn->tid;
+
+ if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb ||
+ !c3cn->cdev)
+ return;
+
+ c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
+
+ c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn);
+
+ c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING);
+
+ /* Purge the send queue so we don't send anything after an abort. */
+ c3cn_purge_write_queue(c3cn);
+
+ c3cn->cpl_abort_req = NULL;
+ req = (struct cpl_abort_req *)skb->head;
+
+ skb->priority = CPL_PRIORITY_DATA;
+ set_arp_failure_handler(skb, abort_arp_failure);
+
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
+ req->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ req->rsvd0 = htonl(c3cn->snd_nxt);
+ req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT);
+ req->cmd = CPL_ABORT_SEND_RST;
+
+ l2t_send(c3cn->cdev, skb, c3cn->l2t);
+}
+
+/*
+ * CPL connection abort reply: host ->
+ *
+ * Send an ABORT_RPL message in response of the ABORT_REQ received.
+ */
+static void send_abort_rpl(struct s3_conn *c3cn, int rst_status)
+{
+ struct sk_buff *skb = c3cn->cpl_abort_rpl;
+ struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
+
+ c3cn->cpl_abort_rpl = NULL;
+
+ skb->priority = CPL_PRIORITY_DATA;
+ rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid));
+ rpl->cmd = rst_status;
+
+ cxgb3_ofld_send(c3cn->cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
+ * credits sent.
+ */
+static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack)
+{
+ struct sk_buff *skb;
+ struct cpl_rx_data_ack *req;
+
+ skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+ if (!skb)
+ return 0;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
+ req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid));
+ req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
+ skb->priority = CPL_PRIORITY_ACK;
+ cxgb3_ofld_send(c3cn->cdev, skb);
+ return credits;
+}
+
+/*
+ * CPL connection tx data: host ->
+ *
+ * Send iscsi PDU via TX_DATA CPL message. Returns the number of
+ * credits sent.
+ * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
+ * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
+ */
+
+/*
+ * For ULP connections HW may inserts digest bytes into the pdu. Those digest
+ * bytes are not sent by the host but are part of the TCP payload and therefore
+ * consume TCP sequence space.
+ */
+static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 };
+static inline unsigned int ulp_extra_len(const struct sk_buff *skb)
+{
+ return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3];
+}
+
+static unsigned int wrlen __read_mostly;
+
+/*
+ * The number of WRs needed for an skb depends on the number of fragments
+ * in the skb and whether it has any payload in its main body. This maps the
+ * length of the gather list represented by an skb into the # of necessary WRs.
+ *
+ * The max. length of an skb is controlled by the max pdu size which is ~16K.
+ * Also, assume the min. fragment length is the sector size (512), then add
+ * extra fragment counts for iscsi bhs and payload padding.
+ */
+#define SKB_WR_LIST_SIZE (16384/512 + 3)
+static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
+
+static void s3_init_wr_tab(unsigned int wr_len)
+{
+ int i;
+
+ if (skb_wrs[1]) /* already initialized */
+ return;
+
+ for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
+ int sgl_len = (3 * i) / 2 + (i & 1);
+
+ sgl_len += 3;
+ skb_wrs[i] = (sgl_len <= wr_len
+ ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
+ }
+
+ wrlen = wr_len * 8;
+}
+
+static inline void reset_wr_list(struct s3_conn *c3cn)
+{
+ c3cn->wr_pending_head = NULL;
+}
+
+/*
+ * Add a WR to a connections's list of pending WRs. This is a singly-linked
+ * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head
+ * and the tail in wr_pending_tail.
+ */
+static inline void enqueue_wr(struct s3_conn *c3cn,
+ struct sk_buff *skb)
+{
+ skb_wr_data(skb) = NULL;
+
+ /*
+ * We want to take an extra reference since both us and the driver
+ * need to free the packet before it's really freed. We know there's
+ * just one user currently so we use atomic_set rather than skb_get
+ * to avoid the atomic op.
+ */
+ atomic_set(&skb->users, 2);
+
+ if (!c3cn->wr_pending_head)
+ c3cn->wr_pending_head = skb;
+ else
+ skb_wr_data(skb) = skb;
+ c3cn->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
+{
+ return c3cn->wr_pending_head;
+}
+
+static inline void free_wr_skb(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb = c3cn->wr_pending_head;
+
+ if (likely(skb)) {
+ /* Don't bother clearing the tail */
+ c3cn->wr_pending_head = skb_wr_data(skb);
+ skb_wr_data(skb) = NULL;
+ }
+ return skb;
+}
+
+static void purge_wr_queue(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb;
+ while ((skb = dequeue_wr(c3cn)) != NULL)
+ free_wr_skb(skb);
+}
+
+static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
+ int len)
+{
+ struct tx_data_wr *req;
+
+ skb_reset_transport_header(skb);
+ req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+ req->wr_lo = htonl(V_WR_TID(c3cn->tid));
+ req->sndseq = htonl(c3cn->snd_nxt);
+ /* len includes the length of any HW ULP additions */
+ req->len = htonl(len);
+ req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx));
+ /* V_TX_ULP_SUBMODE sets both the mode and submode */
+ req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) |
+ V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1)));
+
+ if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) {
+ req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
+ V_TX_CPU_IDX(c3cn->qset));
+ /* Sendbuffer is in units of 32KB. */
+ req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15));
+ c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT);
+ }
+}
+
+/**
+ * c3cn_push_tx_frames -- start transmit
+ * @c3cn: the offloaded connection
+ * @req_completion: request wr_ack or not
+ *
+ * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
+ * connection's send queue and sends them on to T3. Must be called with the
+ * connection's lock held. Returns the amount of send buffer space that was
+ * freed as a result of sending queued data to T3.
+ */
+static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
+{
+ int total_size = 0;
+ struct sk_buff *skb;
+ struct t3cdev *cdev;
+ struct cxgb3i_sdev_data *cdata;
+
+ if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
+ c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
+ c3cn->state == C3CN_STATE_ABORTING)) {
+ c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
+ c3cn, c3cn->state);
+ return 0;
+ }
+
+ cdev = c3cn->cdev;
+ cdata = CXGB3_SDEV_DATA(cdev);
+
+ while (c3cn->wr_avail
+ && (skb = skb_peek(&c3cn->write_queue)) != NULL) {
+ int len = skb->len; /* length before skb_push */
+ int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
+ int wrs_needed = skb_wrs[frags];
+
+ if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
+ wrs_needed = 1;
+
+ WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
+
+ if (c3cn->wr_avail < wrs_needed) {
+ c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
+ "wr %d < %u.\n",
+ c3cn, skb->len, skb->datalen, frags,
+ wrs_needed, c3cn->wr_avail);
+ break;
+ }
+
+ __skb_unlink(skb, &c3cn->write_queue);
+ skb->priority = CPL_PRIORITY_DATA;
+ skb->csum = wrs_needed; /* remember this until the WR_ACK */
+ c3cn->wr_avail -= wrs_needed;
+ c3cn->wr_unacked += wrs_needed;
+ enqueue_wr(c3cn, skb);
+
+ if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) {
+ len += ulp_extra_len(skb);
+ make_tx_data_wr(c3cn, skb, len);
+ c3cn->snd_nxt += len;
+ if ((req_completion
+ && c3cn->wr_unacked == wrs_needed)
+ || (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
+ || c3cn->wr_unacked >= c3cn->wr_max / 2) {
+ struct work_request_hdr *wr = cplhdr(skb);
+
+ wr->wr_hi |= htonl(F_WR_COMPL);
+ c3cn->wr_unacked = 0;
+ }
+ CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
+ }
+
+ total_size += skb->truesize;
+ set_arp_failure_handler(skb, arp_failure_discard);
+ l2t_send(cdev, skb, c3cn->l2t);
+ }
+ return total_size;
+}
+
+/*
+ * process_cpl_msg: -> host
+ * Top-level CPL message processing used by most CPL messages that
+ * pertain to connections.
+ */
+static inline void process_cpl_msg(void (*fn)(struct s3_conn *,
+ struct sk_buff *),
+ struct s3_conn *c3cn,
+ struct sk_buff *skb)
+{
+ spin_lock_bh(&c3cn->lock);
+ fn(c3cn, skb);
+ spin_unlock_bh(&c3cn->lock);
+}
+
+/*
+ * process_cpl_msg_ref: -> host
+ * Similar to process_cpl_msg() but takes an extra connection reference around
+ * the call to the handler. Should be used if the handler may drop a
+ * connection reference.
+ */
+static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *,
+ struct sk_buff *),
+ struct s3_conn *c3cn,
+ struct sk_buff *skb)
+{
+ c3cn_hold(c3cn);
+ process_cpl_msg(fn, c3cn, skb);
+ c3cn_put(c3cn);
+}
+
+/*
+ * Process a CPL_ACT_ESTABLISH message: -> host
+ * Updates connection state from an active establish CPL message. Runs with
+ * the connection lock held.
+ */
+
+static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid)
+{
+ struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid);
+ if (c3cn)
+ c3cn_put(c3cn);
+}
+
+static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn,
+ unsigned int opt)
+{
+ c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+ c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn;
+
+ /*
+ * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
+ * pass through opt0.
+ */
+ if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10))
+ c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10);
+
+ dst_confirm(c3cn->dst_cache);
+
+ smp_mb();
+
+ c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED);
+}
+
+static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct cpl_act_establish *req = cplhdr(skb);
+ u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (unlikely(c3cn->state != C3CN_STATE_CONNECTING))
+ cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n",
+ c3cn->tid, c3cn->state);
+
+ c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn;
+ c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+
+ __kfree_skb(skb);
+
+ if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
+ /* upper layer has requested closing */
+ send_abort_req(c3cn);
+ else if (c3cn_push_tx_frames(c3cn, 1))
+ cxgb3i_conn_tx_open(c3cn);
+}
+
+static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
+ void *ctx)
+{
+ struct cpl_act_establish *req = cplhdr(skb);
+ unsigned int tid = GET_TID(req);
+ unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ struct s3_conn *c3cn = ctx;
+ struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
+
+ c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
+ tid, c3cn, c3cn->state, c3cn->flags);
+
+ c3cn->tid = tid;
+ c3cn_hold(c3cn);
+ cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid);
+ s3_free_atid(cdev, atid);
+
+ c3cn->qset = G_QNUM(ntohl(skb->csum));
+
+ process_cpl_msg(process_act_establish, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process a CPL_ACT_OPEN_RPL message: -> host
+ * Handle active open failures.
+ */
+static int act_open_rpl_status_to_errno(int status)
+{
+ switch (status) {
+ case CPL_ERR_CONN_RESET:
+ return ECONNREFUSED;
+ case CPL_ERR_ARP_MISS:
+ return EHOSTUNREACH;
+ case CPL_ERR_CONN_TIMEDOUT:
+ return ETIMEDOUT;
+ case CPL_ERR_TCAM_FULL:
+ return ENOMEM;
+ case CPL_ERR_CONN_EXIST:
+ cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
+ return EADDRINUSE;
+ default:
+ return EIO;
+ }
+}
+
+static void act_open_retry_timer(unsigned long data)
+{
+ struct sk_buff *skb;
+ struct s3_conn *c3cn = (struct s3_conn *)data;
+
+ c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state);
+
+ spin_lock_bh(&c3cn->lock);
+ skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
+ if (!skb)
+ fail_act_open(c3cn, ENOMEM);
+ else {
+ skb->sk = (struct sock *)c3cn;
+ set_arp_failure_handler(skb, act_open_req_arp_failure);
+ make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
+ l2t_send(c3cn->cdev, skb, c3cn->l2t);
+ }
+ spin_unlock_bh(&c3cn->lock);
+ c3cn_put(c3cn);
+}
+
+static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (rpl->status == CPL_ERR_CONN_EXIST &&
+ c3cn->retry_timer.function != act_open_retry_timer) {
+ c3cn->retry_timer.function = act_open_retry_timer;
+ if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2))
+ c3cn_hold(c3cn);
+ } else
+ fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status));
+ __kfree_skb(skb);
+}
+
+static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct s3_conn *c3cn = ctx;
+ struct cpl_act_open_rpl *rpl = cplhdr(skb);
+
+ c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n",
+ rpl->status, c3cn, c3cn->state, c3cn->flags);
+
+ if (rpl->status != CPL_ERR_TCAM_FULL &&
+ rpl->status != CPL_ERR_CONN_EXIST &&
+ rpl->status != CPL_ERR_ARP_MISS)
+ cxgb3_queue_tid_release(cdev, GET_TID(rpl));
+
+ process_cpl_msg_ref(process_act_open_rpl, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process PEER_CLOSE CPL messages: -> host
+ * Handle peer FIN.
+ */
+static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
+ goto out;
+
+ switch (c3cn->state) {
+ case C3CN_STATE_ESTABLISHED:
+ c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE);
+ break;
+ case C3CN_STATE_ACTIVE_CLOSE:
+ c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
+ break;
+ case C3CN_STATE_CLOSE_WAIT_1:
+ c3cn_closed(c3cn);
+ break;
+ case C3CN_STATE_ABORTING:
+ break;
+ default:
+ cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n",
+ c3cn->cdev->name, c3cn->tid, c3cn->state);
+ }
+
+ cxgb3i_conn_closing(c3cn);
+out:
+ __kfree_skb(skb);
+}
+
+static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct s3_conn *c3cn = ctx;
+
+ c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+ process_cpl_msg_ref(process_peer_close, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process CLOSE_CONN_RPL CPL message: -> host
+ * Process a peer ACK to our FIN.
+ */
+static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct cpl_close_con_rpl *rpl = cplhdr(skb);
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */
+
+ if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING))
+ goto out;
+
+ switch (c3cn->state) {
+ case C3CN_STATE_ACTIVE_CLOSE:
+ c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1);
+ break;
+ case C3CN_STATE_CLOSE_WAIT_1:
+ case C3CN_STATE_CLOSE_WAIT_2:
+ c3cn_closed(c3cn);
+ break;
+ case C3CN_STATE_ABORTING:
+ break;
+ default:
+ cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n",
+ c3cn->cdev->name, c3cn->tid, c3cn->state);
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
+ void *ctx)
+{
+ struct s3_conn *c3cn = ctx;
+
+ c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ process_cpl_msg_ref(process_close_con_rpl, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process ABORT_REQ_RSS CPL message: -> host
+ * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
+ * request except that we need to reply to it.
+ */
+
+static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
+ int *need_rst)
+{
+ switch (abort_reason) {
+ case CPL_ERR_BAD_SYN: /* fall through */
+ case CPL_ERR_CONN_RESET:
+ return c3cn->state > C3CN_STATE_ESTABLISHED ?
+ EPIPE : ECONNRESET;
+ case CPL_ERR_XMIT_TIMEDOUT:
+ case CPL_ERR_PERSIST_TIMEDOUT:
+ case CPL_ERR_FINWAIT2_TIMEDOUT:
+ case CPL_ERR_KEEPALIVE_TIMEDOUT:
+ return ETIMEDOUT;
+ default:
+ return EIO;
+ }
+}
+
+static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ int rst_status = CPL_ABORT_NO_RST;
+ const struct cpl_abort_req_rss *req = cplhdr(skb);
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) {
+ c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD);
+ c3cn_set_state(c3cn, C3CN_STATE_ABORTING);
+ __kfree_skb(skb);
+ return;
+ }
+
+ c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD);
+ send_abort_rpl(c3cn, rst_status);
+
+ if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
+ c3cn->err =
+ abort_status_to_errno(c3cn, req->status, &rst_status);
+ c3cn_closed(c3cn);
+ }
+}
+
+static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ const struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct s3_conn *c3cn = ctx;
+
+ c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
+ req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
+ __kfree_skb(skb);
+ return 0;
+ }
+
+ process_cpl_msg_ref(process_abort_req, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process ABORT_RPL_RSS CPL message: -> host
+ * Process abort replies. We only process these messages if we anticipate
+ * them as the coordination between SW and HW in this area is somewhat lacking
+ * and sometimes we get ABORT_RPLs after we are done with the connection that
+ * originated the ABORT_REQ.
+ */
+static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) {
+ if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD))
+ c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD);
+ else {
+ c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD);
+ c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING);
+ if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD))
+ cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n",
+ c3cn->cdev->name, c3cn->tid);
+ c3cn_closed(c3cn);
+ }
+ }
+ __kfree_skb(skb);
+}
+
+static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+ struct s3_conn *c3cn = ctx;
+
+ c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n",
+ rpl->status, c3cn, c3cn ? c3cn->state : 0,
+ c3cn ? c3cn->flags : 0UL);
+
+ /*
+ * Ignore replies to post-close aborts indicating that the abort was
+ * requested too late. These connections are terminated when we get
+ * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
+ * arrives the TID is either no longer used or it has been recycled.
+ */
+ if (rpl->status == CPL_ERR_ABORT_FAILED)
+ goto discard;
+
+ /*
+ * Sometimes we've already closed the connection, e.g., a post-close
+ * abort races with ABORT_REQ_RSS, the latter frees the connection
+ * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
+ * but FW turns the ABORT_REQ into a regular one and so we get
+ * ABORT_RPL_RSS with status 0 and no connection.
+ */
+ if (!c3cn)
+ goto discard;
+
+ process_cpl_msg_ref(process_abort_rpl, c3cn, skb);
+ return 0;
+
+discard:
+ __kfree_skb(skb);
+ return 0;
+}
+
+/*
+ * Process RX_ISCSI_HDR CPL message: -> host
+ * Handle received PDUs, the payload could be DDP'ed. If not, the payload
+ * follow after the bhs.
+ */
+static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
+ struct cpl_iscsi_hdr_norss data_cpl;
+ struct cpl_rx_data_ddp_norss ddp_cpl;
+ unsigned int hdr_len, data_len, status;
+ unsigned int len;
+ int err;
+
+ if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) {
+ if (c3cn->state != C3CN_STATE_ABORTING)
+ send_abort_req(c3cn);
+ __kfree_skb(skb);
+ return;
+ }
+
+ CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq);
+ CXGB3_SKB_CB(skb)->flags = 0;
+
+ skb_reset_transport_header(skb);
+ __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
+
+ len = hdr_len = ntohs(hdr_cpl->len);
+ /* msg coalesce is off or not enough data received */
+ if (skb->len <= hdr_len) {
+ cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n",
+ c3cn->cdev->name, c3cn->tid,
+ skb->len, hdr_len);
+ goto abort_conn;
+ }
+
+ err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
+ sizeof(ddp_cpl));
+ if (err < 0)
+ goto abort_conn;
+
+ skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
+ skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len);
+ skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
+ status = ntohl(ddp_cpl.ddp_status);
+
+ c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
+ skb, skb->len, skb_ulp_pdulen(skb), status);
+
+ if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
+ skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
+ if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT))
+ skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR;
+ if (status & (1 << RX_DDP_STATUS_PAD_SHIFT))
+ skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR;
+
+ if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
+ err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
+ if (err < 0)
+ goto abort_conn;
+ data_len = ntohs(data_cpl.len);
+ len += sizeof(data_cpl) + data_len;
+ } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
+ skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
+
+ c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb);
+ __pskb_trim(skb, len);
+ __skb_queue_tail(&c3cn->receive_queue, skb);
+ cxgb3i_conn_pdu_ready(c3cn);
+
+ return;
+
+abort_conn:
+ send_abort_req(c3cn);
+ __kfree_skb(skb);
+}
+
+static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
+{
+ struct s3_conn *c3cn = ctx;
+
+ process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb);
+ return 0;
+}
+
+/*
+ * Process TX_DATA_ACK CPL messages: -> host
+ * Process an acknowledgment of WR completion. Advance snd_una and send the
+ * next batch of work requests from the write queue.
+ */
+static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct cpl_wr_ack *hdr = cplhdr(skb);
+ unsigned int credits = ntohs(hdr->credits);
+ u32 snd_una = ntohl(hdr->snd_una);
+
+ c3cn->wr_avail += credits;
+ if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
+ c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
+
+ while (credits) {
+ struct sk_buff *p = peek_wr(c3cn);
+
+ if (unlikely(!p)) {
+ cxgb3i_log_error("%u WR_ACK credits for TID %u with "
+ "nothing pending, state %u\n",
+ credits, c3cn->tid, c3cn->state);
+ break;
+ }
+ if (unlikely(credits < p->csum)) {
+ p->csum -= credits;
+ break;
+ } else {
+ dequeue_wr(c3cn);
+ credits -= p->csum;
+ free_wr_skb(p);
+ }
+ }
+
+ if (unlikely(before(snd_una, c3cn->snd_una)))
+ goto out_free;
+
+ if (c3cn->snd_una != snd_una) {
+ c3cn->snd_una = snd_una;
+ dst_confirm(c3cn->dst_cache);
+ }
+
+ if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
+ cxgb3i_conn_tx_open(c3cn);
+out_free:
+ __kfree_skb(skb);
+}
+
+static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
+{
+ struct s3_conn *c3cn = ctx;
+
+ process_cpl_msg(process_wr_ack, c3cn, skb);
+ return 0;
+}
+
+/*
+ * for each connection, pre-allocate skbs needed for close/abort requests. So
+ * that we can service the request right away.
+ */
+static void c3cn_free_cpl_skbs(struct s3_conn *c3cn)
+{
+ if (c3cn->cpl_close)
+ kfree_skb(c3cn->cpl_close);
+ if (c3cn->cpl_abort_req)
+ kfree_skb(c3cn->cpl_abort_req);
+ if (c3cn->cpl_abort_rpl)
+ kfree_skb(c3cn->cpl_abort_rpl);
+}
+
+static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn)
+{
+ c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req),
+ GFP_KERNEL);
+ if (!c3cn->cpl_close)
+ return -ENOMEM;
+ skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req));
+
+ c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req),
+ GFP_KERNEL);
+ if (!c3cn->cpl_abort_req)
+ goto free_cpl_skbs;
+ skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req));
+
+ c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl),
+ GFP_KERNEL);
+ if (!c3cn->cpl_abort_rpl)
+ goto free_cpl_skbs;
+ skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl));
+
+ return 0;
+
+free_cpl_skbs:
+ c3cn_free_cpl_skbs(c3cn);
+ return -ENOMEM;
+}
+
+/**
+ * c3cn_release_offload_resources - release offload resource
+ * @c3cn: the offloaded iscsi tcp connection.
+ * Release resources held by an offload connection (TID, L2T entry, etc.)
+ */
+static void c3cn_release_offload_resources(struct s3_conn *c3cn)
+{
+ struct t3cdev *cdev = c3cn->cdev;
+ unsigned int tid = c3cn->tid;
+
+ if (!cdev)
+ return;
+
+ c3cn->qset = 0;
+
+ c3cn_free_cpl_skbs(c3cn);
+
+ if (c3cn->wr_avail != c3cn->wr_max) {
+ purge_wr_queue(c3cn);
+ reset_wr_list(c3cn);
+ }
+
+ if (c3cn->l2t) {
+ l2t_release(L2DATA(cdev), c3cn->l2t);
+ c3cn->l2t = NULL;
+ }
+
+ if (c3cn->state == C3CN_STATE_CONNECTING) /* we have ATID */
+ s3_free_atid(cdev, tid);
+ else { /* we have TID */
+ cxgb3_remove_tid(cdev, (void *)c3cn, tid);
+ c3cn_put(c3cn);
+ }
+
+ c3cn->cdev = NULL;
+}
+
+/**
+ * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure
+ * returns the s3_conn structure allocated.
+ */
+struct s3_conn *cxgb3i_c3cn_create(void)
+{
+ struct s3_conn *c3cn;
+
+ c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL);
+ if (!c3cn)
+ return NULL;
+
+ /* pre-allocate close/abort cpl, so we don't need to wait for memory
+ when close/abort is requested. */
+ if (c3cn_alloc_cpl_skbs(c3cn) < 0)
+ goto free_c3cn;
+
+ c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn);
+
+ c3cn->flags = 0;
+ spin_lock_init(&c3cn->lock);
+ atomic_set(&c3cn->refcnt, 1);
+ skb_queue_head_init(&c3cn->receive_queue);
+ skb_queue_head_init(&c3cn->write_queue);
+ setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn);
+ rwlock_init(&c3cn->callback_lock);
+
+ return c3cn;
+
+free_c3cn:
+ kfree(c3cn);
+ return NULL;
+}
+
+static void c3cn_active_close(struct s3_conn *c3cn)
+{
+ int data_lost;
+ int close_req = 0;
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+
+ dst_confirm(c3cn->dst_cache);
+
+ c3cn_hold(c3cn);
+ spin_lock_bh(&c3cn->lock);
+
+ data_lost = skb_queue_len(&c3cn->receive_queue);
+ __skb_queue_purge(&c3cn->receive_queue);
+
+ switch (c3cn->state) {
+ case C3CN_STATE_CLOSED:
+ case C3CN_STATE_ACTIVE_CLOSE:
+ case C3CN_STATE_CLOSE_WAIT_1:
+ case C3CN_STATE_CLOSE_WAIT_2:
+ case C3CN_STATE_ABORTING:
+ /* nothing need to be done */
+ break;
+ case C3CN_STATE_CONNECTING:
+ /* defer until cpl_act_open_rpl or cpl_act_establish */
+ c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
+ break;
+ case C3CN_STATE_ESTABLISHED:
+ close_req = 1;
+ c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE);
+ break;
+ case C3CN_STATE_PASSIVE_CLOSE:
+ close_req = 1;
+ c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2);
+ break;
+ }
+
+ if (close_req) {
+ if (data_lost)
+ /* Unread data was tossed, zap the connection. */
+ send_abort_req(c3cn);
+ else
+ send_close_req(c3cn);
+ }
+
+ spin_unlock_bh(&c3cn->lock);
+ c3cn_put(c3cn);
+}
+
+/**
+ * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any
+ * resource held
+ * @c3cn: the iscsi tcp connection
+ */
+void cxgb3i_c3cn_release(struct s3_conn *c3cn)
+{
+ c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+ if (likely(c3cn->state != C3CN_STATE_CONNECTING))
+ c3cn_active_close(c3cn);
+ else
+ c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED);
+ c3cn_put(c3cn);
+}
+
+static int is_cxgb3_dev(struct net_device *dev)
+{
+ struct cxgb3i_sdev_data *cdata;
+
+ write_lock(&cdata_rwlock);
+ list_for_each_entry(cdata, &cdata_list, list) {
+ struct adap_ports *ports = &cdata->ports;
+ int i;
+
+ for (i = 0; i < ports->nports; i++)
+ if (dev == ports->lldevs[i]) {
+ write_unlock(&cdata_rwlock);
+ return 1;
+ }
+ }
+ write_unlock(&cdata_rwlock);
+ return 0;
+}
+
+/**
+ * cxgb3_egress_dev - return the cxgb3 egress device
+ * @root_dev: the root device anchoring the search
+ * @c3cn: the connection used to determine egress port in bonding mode
+ * @context: in bonding mode, indicates a connection set up or failover
+ *
+ * Return egress device or NULL if the egress device isn't one of our ports.
+ */
+static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
+ struct s3_conn *c3cn,
+ int context)
+{
+ while (root_dev) {
+ if (root_dev->priv_flags & IFF_802_1Q_VLAN)
+ root_dev = vlan_dev_real_dev(root_dev);
+ else if (is_cxgb3_dev(root_dev))
+ return root_dev;
+ else
+ return NULL;
+ }
+ return NULL;
+}
+
+static struct rtable *find_route(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = daddr,
+ .saddr = saddr,
+ .tos = 0 } },
+ .proto = IPPROTO_TCP,
+ .uli_u = {
+ .ports = {
+ .sport = sport,
+ .dport = dport } } };
+
+ if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
+ return NULL;
+ return rt;
+}
+
+/*
+ * Assign offload parameters to some connection fields.
+ */
+static void init_offload_conn(struct s3_conn *c3cn,
+ struct t3cdev *cdev,
+ struct dst_entry *dst)
+{
+ BUG_ON(c3cn->cdev != cdev);
+ c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs;
+ c3cn->wr_unacked = 0;
+ c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
+
+ reset_wr_list(c3cn);
+}
+
+static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
+{
+ struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev);
+ struct t3cdev *cdev = cdata->cdev;
+ struct dst_entry *dst = c3cn->dst_cache;
+ struct sk_buff *skb;
+
+ c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n",
+ c3cn, c3cn->state, c3cn->flags);
+ /*
+ * Initialize connection data. Note that the flags and ULP mode are
+ * initialized higher up ...
+ */
+ c3cn->dev = dev;
+ c3cn->cdev = cdev;
+ c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn);
+ if (c3cn->tid < 0)
+ goto out_err;
+
+ c3cn->qset = 0;
+ c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev);
+ if (!c3cn->l2t)
+ goto free_tid;
+
+ skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL);
+ if (!skb)
+ goto free_l2t;
+
+ skb->sk = (struct sock *)c3cn;
+ set_arp_failure_handler(skb, act_open_req_arp_failure);
+
+ c3cn_hold(c3cn);
+
+ init_offload_conn(c3cn, cdev, dst);
+ c3cn->err = 0;
+
+ make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t);
+ l2t_send(cdev, skb, c3cn->l2t);
+ return 0;
+
+free_l2t:
+ l2t_release(L2DATA(cdev), c3cn->l2t);
+free_tid:
+ s3_free_atid(cdev, c3cn->tid);
+ c3cn->tid = 0;
+out_err:
+ return -1;
+}
+
+
+/**
+ * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
+ * @c3cn: the iscsi tcp connection
+ * @usin: destination address
+ *
+ * return 0 if active open request is sent, < 0 otherwise.
+ */
+int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
+{
+ struct rtable *rt;
+ struct net_device *dev;
+ struct cxgb3i_sdev_data *cdata;
+ struct t3cdev *cdev;
+ __be32 sipv4;
+ int err;
+
+ if (usin->sin_family != AF_INET)
+ return -EAFNOSUPPORT;
+
+ c3cn->daddr.sin_port = usin->sin_port;
+ c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
+
+ rt = find_route(c3cn->saddr.sin_addr.s_addr,
+ c3cn->daddr.sin_addr.s_addr,
+ c3cn->saddr.sin_port,
+ c3cn->daddr.sin_port);
+ if (rt == NULL) {
+ c3cn_conn_debug("NO route to 0x%x, port %u.\n",
+ c3cn->daddr.sin_addr.s_addr,
+ ntohs(c3cn->daddr.sin_port));
+ return -ENETUNREACH;
+ }
+
+ if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
+ c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
+ c3cn->daddr.sin_addr.s_addr,
+ ntohs(c3cn->daddr.sin_port));
+ ip_rt_put(rt);
+ return -ENETUNREACH;
+ }
+
+ if (!c3cn->saddr.sin_addr.s_addr)
+ c3cn->saddr.sin_addr.s_addr = rt->rt_src;
+
+ /* now commit destination to connection */
+ c3cn->dst_cache = &rt->u.dst;
+
+ /* try to establish an offloaded connection */
+ dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0);
+ if (dev == NULL) {
+ c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn);
+ return -ENETUNREACH;
+ }
+ cdata = NDEV2CDATA(dev);
+ cdev = cdata->cdev;
+
+ /* get a source port if one hasn't been provided */
+ err = c3cn_get_port(c3cn, cdata);
+ if (err)
+ return err;
+
+ c3cn_conn_debug("c3cn 0x%p get port %u.\n",
+ c3cn, ntohs(c3cn->saddr.sin_port));
+
+ sipv4 = cxgb3i_get_private_ipv4addr(dev);
+ if (!sipv4) {
+ c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn);
+ sipv4 = c3cn->saddr.sin_addr.s_addr;
+ cxgb3i_set_private_ipv4addr(dev, sipv4);
+ } else
+ c3cn->saddr.sin_addr.s_addr = sipv4;
+
+ c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
+ c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
+ ntohs(c3cn->saddr.sin_port),
+ NIPQUAD(c3cn->daddr.sin_addr.s_addr),
+ ntohs(c3cn->daddr.sin_port));
+
+ c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
+ if (!initiate_act_open(c3cn, dev))
+ return 0;
+
+ /*
+ * If we get here, we don't have an offload connection so simply
+ * return a failure.
+ */
+ err = -ENOTSUPP;
+
+ /*
+ * This trashes the connection and releases the local port,
+ * if necessary.
+ */
+ c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn);
+ c3cn_set_state(c3cn, C3CN_STATE_CLOSED);
+ ip_rt_put(rt);
+ c3cn_put_port(c3cn);
+ c3cn->daddr.sin_port = 0;
+ return err;
+}
+
+/**
+ * cxgb3i_c3cn_rx_credits - ack received tcp data.
+ * @c3cn: iscsi tcp connection
+ * @copied: # of bytes processed
+ *
+ * Called after some received data has been read. It returns RX credits
+ * to the HW for the amount of data processed.
+ */
+void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied)
+{
+ struct t3cdev *cdev;
+ int must_send;
+ u32 credits, dack = 0;
+
+ if (c3cn->state != C3CN_STATE_ESTABLISHED)
+ return;
+
+ credits = c3cn->copied_seq - c3cn->rcv_wup;
+ if (unlikely(!credits))
+ return;
+
+ cdev = c3cn->cdev;
+
+ if (unlikely(cxgb3_rx_credit_thres == 0))
+ return;
+
+ dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
+
+ /*
+ * For coalescing to work effectively ensure the receive window has
+ * at least 16KB left.
+ */
+ must_send = credits + 16384 >= cxgb3_rcv_win;
+
+ if (must_send || credits >= cxgb3_rx_credit_thres)
+ c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack);
+}
+
+/**
+ * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus
+ * @c3cn: iscsi tcp connection
+ * @skb: skb contains the iscsi pdu
+ *
+ * Add a list of skbs to a connection send queue. The skbs must comply with
+ * the max size limit of the device and have a headroom of at least
+ * TX_HEADER_LEN bytes.
+ * Return # of bytes queued.
+ */
+int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
+{
+ struct sk_buff *next;
+ int err, copied = 0;
+
+ spin_lock_bh(&c3cn->lock);
+
+ if (c3cn->state != C3CN_STATE_ESTABLISHED) {
+ c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n",
+ c3cn, c3cn->state);
+ err = -EAGAIN;
+ goto out_err;
+ }
+
+ err = -EPIPE;
+ if (c3cn->err) {
+ c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
+ goto out_err;
+ }
+
+ while (skb) {
+ int frags = skb_shinfo(skb)->nr_frags +
+ (skb->len != skb->data_len);
+
+ if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) {
+ c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (frags >= SKB_WR_LIST_SIZE) {
+ cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n",
+ c3cn, skb_shinfo(skb)->nr_frags,
+ skb->len, skb->data_len);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ next = skb->next;
+ skb->next = NULL;
+ skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR);
+ copied += skb->len;
+ c3cn->write_seq += skb->len + ulp_extra_len(skb);
+ skb = next;
+ }
+done:
+ if (likely(skb_queue_len(&c3cn->write_queue)))
+ c3cn_push_tx_frames(c3cn, 1);
+ spin_unlock_bh(&c3cn->lock);
+ return copied;
+
+out_err:
+ if (copied == 0 && err == -EPIPE)
+ copied = c3cn->err ? c3cn->err : -EPIPE;
+ goto done;
+}
+
+static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata)
+{
+ struct adap_ports *ports = &cdata->ports;
+ int i;
+
+ for (i = 0; i < ports->nports; i++)
+ NDEV2CDATA(ports->lldevs[i]) = NULL;
+ cxgb3i_free_big_mem(cdata);
+}
+
+void cxgb3i_sdev_cleanup(void)
+{
+ struct cxgb3i_sdev_data *cdata;
+
+ write_lock(&cdata_rwlock);
+ list_for_each_entry(cdata, &cdata_list, list) {
+ list_del(&cdata->list);
+ sdev_data_cleanup(cdata);
+ }
+ write_unlock(&cdata_rwlock);
+}
+
+int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers)
+{
+ cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish;
+ cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl;
+ cpl_handlers[CPL_PEER_CLOSE] = do_peer_close;
+ cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req;
+ cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl;
+ cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl;
+ cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack;
+ cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr;
+
+ if (cxgb3_max_connect > CXGB3I_MAX_CONN)
+ cxgb3_max_connect = CXGB3I_MAX_CONN;
+ return 0;
+}
+
+/**
+ * cxgb3i_sdev_add - allocate and initialize resources for each adapter found
+ * @cdev: t3cdev adapter
+ * @client: cxgb3 driver client
+ */
+void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client)
+{
+ struct cxgb3i_sdev_data *cdata;
+ struct ofld_page_info rx_page_info;
+ unsigned int wr_len;
+ int mapsize = DIV_ROUND_UP(cxgb3_max_connect,
+ 8 * sizeof(unsigned long));
+ int i;
+
+ cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL);
+ if (!cdata)
+ return;
+
+ if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 ||
+ cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 ||
+ cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0)
+ goto free_cdata;
+
+ s3_init_wr_tab(wr_len);
+
+ INIT_LIST_HEAD(&cdata->list);
+ cdata->cdev = cdev;
+ cdata->client = client;
+
+ for (i = 0; i < cdata->ports.nports; i++)
+ NDEV2CDATA(cdata->ports.lldevs[i]) = cdata;
+
+ write_lock(&cdata_rwlock);
+ list_add_tail(&cdata->list, &cdata_list);
+ write_unlock(&cdata_rwlock);
+
+ return;
+
+free_cdata:
+ cxgb3i_free_big_mem(cdata);
+}
+
+/**
+ * cxgb3i_sdev_remove - free the allocated resources for the adapter
+ * @cdev: t3cdev adapter
+ */
+void cxgb3i_sdev_remove(struct t3cdev *cdev)
+{
+ struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev);
+
+ write_lock(&cdata_rwlock);
+ list_del(&cdata->list);
+ write_unlock(&cdata_rwlock);
+
+ sdev_data_cleanup(cdata);
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h
new file mode 100644
index 000000000000..d23156907ffd
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h
@@ -0,0 +1,231 @@
+/*
+ * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management
+ *
+ * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
+ * release for licensing terms and conditions.
+ *
+ * Written by: Dimitris Michailidis (dm@chelsio.com)
+ * Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef _CXGB3I_OFFLOAD_H
+#define _CXGB3I_OFFLOAD_H
+
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include "common.h"
+#include "adapter.h"
+#include "t3cdev.h"
+#include "cxgb3_offload.h"
+
+#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt)
+#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt)
+#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt)
+#define cxgb3i_log_debug(fmt, args...) \
+ printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args)
+
+/**
+ * struct s3_conn - an iscsi tcp connection structure
+ *
+ * @dev: net device of with connection
+ * @cdev: adapter t3cdev for net device
+ * @flags: see c3cn_flags below
+ * @tid: connection id assigned by the h/w
+ * @qset: queue set used by connection
+ * @mss_idx: Maximum Segment Size table index
+ * @l2t: ARP resolution entry for offload packets
+ * @wr_max: maximum in-flight writes
+ * @wr_avail: number of writes available
+ * @wr_unacked: writes since last request for completion notification
+ * @wr_pending_head: head of pending write queue
+ * @wr_pending_tail: tail of pending write queue
+ * @cpl_close: skb for cpl_close_req
+ * @cpl_abort_req: skb for cpl_abort_req
+ * @cpl_abort_rpl: skb for cpl_abort_rpl
+ * @lock: connection status lock
+ * @refcnt: reference count on connection
+ * @state: connection state
+ * @saddr: source ip/port address
+ * @daddr: destination ip/port address
+ * @dst_cache: reference to destination route
+ * @receive_queue: received PDUs
+ * @write_queue: un-pushed pending writes
+ * @retry_timer: retry timer for various operations
+ * @err: connection error status
+ * @callback_lock: lock for opaque user context
+ * @user_data: opaque user context
+ * @rcv_nxt: next receive seq. #
+ * @copied_seq: head of yet unread data
+ * @rcv_wup: rcv_nxt on last window update sent
+ * @snd_nxt: next sequence we send
+ * @snd_una: first byte we want an ack for
+ * @write_seq: tail+1 of data held in send buffer
+ */
+struct s3_conn {
+ struct net_device *dev;
+ struct t3cdev *cdev;
+ unsigned long flags;
+ int tid;
+ int qset;
+ int mss_idx;
+ struct l2t_entry *l2t;
+ int wr_max;
+ int wr_avail;
+ int wr_unacked;
+ struct sk_buff *wr_pending_head;
+ struct sk_buff *wr_pending_tail;
+ struct sk_buff *cpl_close;
+ struct sk_buff *cpl_abort_req;
+ struct sk_buff *cpl_abort_rpl;
+ spinlock_t lock;
+ atomic_t refcnt;
+ volatile unsigned int state;
+ struct sockaddr_in saddr;
+ struct sockaddr_in daddr;
+ struct dst_entry *dst_cache;
+ struct sk_buff_head receive_queue;
+ struct sk_buff_head write_queue;
+ struct timer_list retry_timer;
+ int err;
+ rwlock_t callback_lock;
+ void *user_data;
+
+ u32 rcv_nxt;
+ u32 copied_seq;
+ u32 rcv_wup;
+ u32 snd_nxt;
+ u32 snd_una;
+ u32 write_seq;
+};
+
+/*
+ * connection state
+ */
+enum conn_states {
+ C3CN_STATE_CONNECTING = 1,
+ C3CN_STATE_ESTABLISHED,
+ C3CN_STATE_ACTIVE_CLOSE,
+ C3CN_STATE_PASSIVE_CLOSE,
+ C3CN_STATE_CLOSE_WAIT_1,
+ C3CN_STATE_CLOSE_WAIT_2,
+ C3CN_STATE_ABORTING,
+ C3CN_STATE_CLOSED,
+};
+
+static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn)
+{
+ return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE;
+}
+static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn)
+{
+ return c3cn->state == C3CN_STATE_ESTABLISHED;
+}
+
+/*
+ * Connection flags -- many to track some close related events.
+ */
+enum c3cn_flags {
+ C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */
+ C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */
+ C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */
+ C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */
+ C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */
+};
+
+/**
+ * cxgb3i_sdev_data - Per adapter data.
+ * Linked off of each Ethernet device port on the adapter.
+ * Also available via the t3cdev structure since we have pointers to our port
+ * net_device's there ...
+ *
+ * @list: list head to link elements
+ * @cdev: t3cdev adapter
+ * @client: CPL client pointer
+ * @ports: array of adapter ports
+ * @sport_map_next: next index into the port map
+ * @sport_map: source port map
+ */
+struct cxgb3i_sdev_data {
+ struct list_head list;
+ struct t3cdev *cdev;
+ struct cxgb3_client *client;
+ struct adap_ports ports;
+ unsigned int sport_map_next;
+ unsigned long sport_map[0];
+};
+#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr)
+#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev)
+
+void cxgb3i_sdev_cleanup(void);
+int cxgb3i_sdev_init(cxgb3_cpl_handler_func *);
+void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
+void cxgb3i_sdev_remove(struct t3cdev *);
+
+struct s3_conn *cxgb3i_c3cn_create(void);
+int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
+void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
+int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
+void cxgb3i_c3cn_release(struct s3_conn *);
+
+/**
+ * cxgb3_skb_cb - control block for received pdu state and ULP mode management.
+ *
+ * @flag: see C3CB_FLAG_* below
+ * @ulp_mode: ULP mode/submode of sk_buff
+ * @seq: tcp sequence number
+ * @ddigest: pdu data digest
+ * @pdulen: recovered pdu length
+ * @wr_data: scratch area for tx wr
+ */
+struct cxgb3_skb_cb {
+ __u8 flags;
+ __u8 ulp_mode;
+ __u32 seq;
+ __u32 ddigest;
+ __u32 pdulen;
+ struct sk_buff *wr_data;
+};
+
+#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
+
+#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
+#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest)
+#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
+#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data)
+
+enum c3cb_flags {
+ C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
+ C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */
+ C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */
+};
+
+/**
+ * sge_opaque_hdr -
+ * Opaque version of structure the SGE stores at skb->head of TX_DATA packets
+ * and for which we must reserve space.
+ */
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+#define TX_HEADER_LEN \
+ (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
+
+/*
+ * get and set private ip for iscsi traffic
+ */
+#define cxgb3i_get_private_ipv4addr(ndev) \
+ (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
+#define cxgb3i_set_private_ipv4addr(ndev, addr) \
+ (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+
+/* max. connections per adapter */
+#define CXGB3I_MAX_CONN 16384
+#endif /* _CXGB3_OFFLOAD_H */
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
new file mode 100644
index 000000000000..ce7ce8c6094c
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -0,0 +1,402 @@
+/*
+ * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ * Copyright (c) 2008 Mike Christie
+ * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+
+#include "cxgb3i.h"
+#include "cxgb3i_pdu.h"
+
+#ifdef __DEBUG_CXGB3I_RX__
+#define cxgb3i_rx_debug cxgb3i_log_debug
+#else
+#define cxgb3i_rx_debug(fmt...)
+#endif
+
+#ifdef __DEBUG_CXGB3I_TX__
+#define cxgb3i_tx_debug cxgb3i_log_debug
+#else
+#define cxgb3i_tx_debug(fmt...)
+#endif
+
+static struct page *pad_page;
+
+/*
+ * pdu receive, interact with libiscsi_tcp
+ */
+static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+ unsigned int offset, int offloaded)
+{
+ int status = 0;
+ int bytes_read;
+
+ bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
+ switch (status) {
+ case ISCSI_TCP_CONN_ERR:
+ return -EIO;
+ case ISCSI_TCP_SUSPENDED:
+ /* no transfer - just have caller flush queue */
+ return bytes_read;
+ case ISCSI_TCP_SKB_DONE:
+ /*
+ * pdus should always fit in the skb and we should get
+ * segment done notifcation.
+ */
+ iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
+ return -EFAULT;
+ case ISCSI_TCP_SEGMENT_DONE:
+ return bytes_read;
+ default:
+ iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
+ "status %d\n", status);
+ return -EINVAL;
+ }
+}
+
+static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
+ struct sk_buff *skb)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ bool offloaded = 0;
+ unsigned int offset;
+ int rc;
+
+ cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
+ conn, skb, skb->len, skb_ulp_mode(skb));
+
+ if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
+ iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
+ return -EIO;
+ }
+
+ if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
+ iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
+ return -EIO;
+ }
+
+ if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
+ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
+ return -EIO;
+ }
+
+ /* iscsi hdr */
+ rc = read_pdu_skb(conn, skb, 0, 0);
+ if (rc <= 0)
+ return rc;
+
+ if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
+ return 0;
+
+ offset = rc;
+ if (conn->hdrdgst_en)
+ offset += ISCSI_DIGEST_SIZE;
+
+ /* iscsi data */
+ if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
+ cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
+ "itt 0x%x.\n",
+ skb,
+ tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+ tcp_conn->in.datalen,
+ ntohl(tcp_conn->in.hdr->itt));
+ offloaded = 1;
+ } else {
+ cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
+ "itt 0x%x.\n",
+ skb,
+ tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
+ tcp_conn->in.datalen,
+ ntohl(tcp_conn->in.hdr->itt));
+ offset += sizeof(struct cpl_iscsi_hdr_norss);
+ }
+
+ rc = read_pdu_skb(conn, skb, offset, offloaded);
+ if (rc < 0)
+ return rc;
+ else
+ return 0;
+}
+
+/*
+ * pdu transmit, interact with libiscsi_tcp
+ */
+static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
+{
+ u8 submode = 0;
+
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
+}
+
+void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ /* never reached the xmit task callout */
+ if (tcp_task->dd_data)
+ kfree_skb(tcp_task->dd_data);
+ tcp_task->dd_data = NULL;
+
+ /* MNC - Do we need a check in case this is called but
+ * cxgb3i_conn_alloc_pdu has never been called on the task */
+ cxgb3i_release_itt(task, task->hdr_itt);
+ iscsi_tcp_cleanup_task(task);
+}
+
+/*
+ * We do not support ahs yet
+ */
+int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct sk_buff *skb;
+
+ task->hdr = NULL;
+ /* always allocate rooms for AHS */
+ skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
+ TX_HEADER_LEN, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
+ task, opcode, skb);
+
+ tcp_task->dd_data = skb;
+ skb_reserve(skb, TX_HEADER_LEN);
+ task->hdr = (struct iscsi_hdr *)skb->data;
+ task->hdr_max = sizeof(struct iscsi_hdr);
+
+ /* data_out uses scsi_cmd's itt */
+ if (opcode != ISCSI_OP_SCSI_DATA_OUT)
+ cxgb3i_reserve_itt(task, &task->hdr->itt);
+
+ return 0;
+}
+
+int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
+ unsigned int count)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct sk_buff *skb = tcp_task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct page *pg;
+ unsigned int datalen = count;
+ int i, padlen = iscsi_padding(count);
+ skb_frag_t *frag;
+
+ cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
+ task, task->sc, offset, count, skb);
+
+ skb_put(skb, task->hdr_len);
+ tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
+ if (!count)
+ return 0;
+
+ if (task->sc) {
+ struct scatterlist *sg;
+ struct scsi_data_buffer *sdb;
+ unsigned int sgoffset = offset;
+ struct page *sgpg;
+ unsigned int sglen;
+
+ sdb = scsi_out(task->sc);
+ sg = sdb->table.sgl;
+
+ for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
+ cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
+ i, sg_page(sg), sg->length, sg->offset);
+
+ if (sgoffset < sg->length)
+ break;
+ sgoffset -= sg->length;
+ }
+ sgpg = sg_page(sg);
+ sglen = sg->length - sgoffset;
+
+ do {
+ int j = skb_shinfo(skb)->nr_frags;
+ unsigned int copy;
+
+ if (!sglen) {
+ sg = sg_next(sg);
+ sgpg = sg_page(sg);
+ sgoffset = 0;
+ sglen = sg->length;
+ ++i;
+ }
+ copy = min(sglen, datalen);
+ if (j && skb_can_coalesce(skb, j, sgpg,
+ sg->offset + sgoffset)) {
+ skb_shinfo(skb)->frags[j - 1].size += copy;
+ } else {
+ get_page(sgpg);
+ skb_fill_page_desc(skb, j, sgpg,
+ sg->offset + sgoffset, copy);
+ }
+ sgoffset += copy;
+ sglen -= copy;
+ datalen -= copy;
+ } while (datalen);
+ } else {
+ pg = virt_to_page(task->data);
+
+ while (datalen) {
+ i = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[i];
+
+ get_page(pg);
+ frag->page = pg;
+ frag->page_offset = 0;
+ frag->size = min((unsigned int)PAGE_SIZE, datalen);
+
+ skb_shinfo(skb)->nr_frags++;
+ datalen -= frag->size;
+ pg++;
+ }
+ }
+
+ if (padlen) {
+ i = skb_shinfo(skb)->nr_frags;
+ frag = &skb_shinfo(skb)->frags[i];
+ frag->page = pad_page;
+ frag->page_offset = 0;
+ frag->size = padlen;
+ skb_shinfo(skb)->nr_frags++;
+ }
+
+ datalen = count + padlen;
+ skb->data_len += datalen;
+ skb->truesize += datalen;
+ skb->len += datalen;
+ return 0;
+}
+
+int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct sk_buff *skb = tcp_task->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
+ struct cxgb3i_conn *cconn = tcp_conn->dd_data;
+ unsigned int datalen;
+ int err;
+
+ if (!skb)
+ return 0;
+
+ datalen = skb->data_len;
+ tcp_task->dd_data = NULL;
+ err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
+ cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+ task, skb, skb->len, skb->data_len, err);
+ if (err > 0) {
+ int pdulen = err;
+
+ if (task->conn->hdrdgst_en)
+ pdulen += ISCSI_DIGEST_SIZE;
+ if (datalen && task->conn->datadgst_en)
+ pdulen += ISCSI_DIGEST_SIZE;
+
+ task->conn->txdata_octets += pdulen;
+ return 0;
+ }
+
+ if (err < 0 && err != -EAGAIN) {
+ kfree_skb(skb);
+ cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
+ task->itt, skb, skb->len, skb->data_len, err);
+ iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
+ iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
+ return err;
+ }
+ /* reset skb to send when we are called again */
+ tcp_task->dd_data = skb;
+ return -EAGAIN;
+}
+
+int cxgb3i_pdu_init(void)
+{
+ pad_page = alloc_page(GFP_KERNEL);
+ if (!pad_page)
+ return -ENOMEM;
+ memset(page_address(pad_page), 0, PAGE_SIZE);
+ return 0;
+}
+
+void cxgb3i_pdu_cleanup(void)
+{
+ if (pad_page) {
+ __free_page(pad_page);
+ pad_page = NULL;
+ }
+}
+
+void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
+{
+ struct sk_buff *skb;
+ unsigned int read = 0;
+ struct iscsi_conn *conn = c3cn->user_data;
+ int err = 0;
+
+ cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
+
+ read_lock(&c3cn->callback_lock);
+ if (unlikely(!conn || conn->suspend_rx)) {
+ cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
+ conn, conn ? conn->id : 0xFF,
+ conn ? conn->suspend_rx : 0xFF);
+ read_unlock(&c3cn->callback_lock);
+ return;
+ }
+ skb = skb_peek(&c3cn->receive_queue);
+ while (!err && skb) {
+ __skb_unlink(skb, &c3cn->receive_queue);
+ read += skb_ulp_pdulen(skb);
+ err = cxgb3i_conn_read_pdu_skb(conn, skb);
+ __kfree_skb(skb);
+ skb = skb_peek(&c3cn->receive_queue);
+ }
+ read_unlock(&c3cn->callback_lock);
+ if (c3cn) {
+ c3cn->copied_seq += read;
+ cxgb3i_c3cn_rx_credits(c3cn, read);
+ }
+ conn->rxdata_octets += read;
+}
+
+void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
+{
+ struct iscsi_conn *conn = c3cn->user_data;
+
+ cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
+ if (conn) {
+ cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+}
+
+void cxgb3i_conn_closing(struct s3_conn *c3cn)
+{
+ struct iscsi_conn *conn;
+
+ read_lock(&c3cn->callback_lock);
+ conn = c3cn->user_data;
+ if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ read_unlock(&c3cn->callback_lock);
+}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
new file mode 100644
index 000000000000..a3f685cc2362
--- /dev/null
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.h
@@ -0,0 +1,59 @@
+/*
+ * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver.
+ *
+ * Copyright (c) 2008 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB3I_ULP2_PDU_H__
+#define __CXGB3I_ULP2_PDU_H__
+
+struct cpl_iscsi_hdr_norss {
+ union opcode_tid ot;
+ u16 pdu_len_ddp;
+ u16 len;
+ u32 seq;
+ u16 urg;
+ u8 rsvd;
+ u8 status;
+};
+
+struct cpl_rx_data_ddp_norss {
+ union opcode_tid ot;
+ u16 urg;
+ u16 len;
+ u32 seq;
+ u32 nxt_seq;
+ u32 ulp_crc;
+ u32 ddp_status;
+};
+
+#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */
+#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */
+#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */
+#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */
+#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */
+#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */
+#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
+#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
+#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
+#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */
+#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */
+#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
+#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */
+
+#define ULP2_FLAG_DATA_READY 0x1
+#define ULP2_FLAG_DATA_DDPED 0x2
+#define ULP2_FLAG_HCRC_ERROR 0x10
+#define ULP2_FLAG_DCRC_ERROR 0x20
+#define ULP2_FLAG_PAD_ERROR 0x40
+
+void cxgb3i_conn_closing(struct s3_conn *);
+void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
+void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
+#endif
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 708e475896b9..e356b43753ff 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -109,7 +109,8 @@ static struct request *get_alua_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES;
rq->timeout = ALUA_FAILOVER_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 8f45570a8a01..0e572d2c5b0a 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -303,7 +303,8 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->timeout = CLARIION_TIMEOUT;
rq->retries = CLARIION_RETRIES;
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 5e93c88ad66b..f7da7530875e 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -107,12 +107,14 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret;
+retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
req->cmd[0] = TEST_UNIT_READY;
req->timeout = HP_SW_TIMEOUT;
@@ -120,7 +122,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
req->sense_len = 0;
-retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
@@ -135,8 +136,10 @@ retry:
h->path_state = HP_SW_PATH_ACTIVE;
ret = SCSI_DH_OK;
}
- if (ret == SCSI_DH_IMM_RETRY)
+ if (ret == SCSI_DH_IMM_RETRY) {
+ blk_put_request(req);
goto retry;
+ }
if (ret == SCSI_DH_DEV_OFFLINED) {
h->path_state = HP_SW_PATH_PASSIVE;
ret = SCSI_DH_OK;
@@ -199,12 +202,14 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
struct request *req;
int ret, retry;
+retry:
req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
if (!req)
return SCSI_DH_RES_TEMP_UNAVAIL;
req->cmd_type = REQ_TYPE_BLOCK_PC;
- req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
req->cmd_len = COMMAND_SIZE(START_STOP);
req->cmd[0] = START_STOP;
req->cmd[4] = 1; /* Start spin cycle */
@@ -214,7 +219,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
req->sense_len = 0;
retry = h->retries;
-retry:
ret = blk_execute_rq(req->q, NULL, req, 1);
if (ret == -EIO) {
if (req->sense_len > 0) {
@@ -229,8 +233,10 @@ retry:
ret = SCSI_DH_OK;
if (ret == SCSI_DH_RETRY) {
- if (--retry)
+ if (--retry) {
+ blk_put_request(req);
goto retry;
+ }
ret = SCSI_DH_IO;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 50bf95f3b5c4..53664765570a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -24,6 +24,7 @@
#include <scsi/scsi_dh.h>
#define RDAC_NAME "rdac"
+#define RDAC_RETRY_COUNT 5
/*
* LSI mode page stuff
@@ -226,7 +227,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
@@ -385,6 +387,7 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
struct c9_inquiry *inqp;
h->lun_state = RDAC_LUN_UNOWNED;
+ h->state = RDAC_STATE_ACTIVE;
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c9;
@@ -400,6 +403,9 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
}
}
+ if (h->lun_state == RDAC_LUN_UNOWNED)
+ h->state = RDAC_STATE_PASSIVE;
+
return err;
}
@@ -473,21 +479,27 @@ static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
+ int err, retry_cnt = RDAC_RETRY_COUNT;
+retry:
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev, h);
if (!rq)
goto done;
- sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
+ sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
+ (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
err = blk_execute_rq(q, NULL, rq, 1);
- if (err != SCSI_DH_OK)
+ blk_put_request(rq);
+ if (err != SCSI_DH_OK) {
err = mode_select_handle_sense(sdev, h->sense);
+ if (err == SCSI_DH_RETRY && retry_cnt--)
+ goto retry;
+ }
if (err == SCSI_DH_OK)
h->state = RDAC_STATE_ACTIVE;
- blk_put_request(rq);
done:
return err;
}
@@ -590,6 +602,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
+ {"LSI", "INF-01-00"},
+ {"ENGENIO", "INF-01-00"},
{NULL, NULL},
};
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 1fe0901e8119..6194ed5d02c4 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -271,7 +271,7 @@ rebuild_sys_tab:
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
if (adpt_sysfs_class) {
- struct device *dev = device_create_drvdata(adpt_sysfs_class,
+ struct device *dev = device_create(adpt_sysfs_class,
NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
"dpti%d", pHba->unit);
if (IS_ERR(dev)) {
@@ -2445,7 +2445,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
hba_status = detailed_status >> 8;
// calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
@@ -2456,7 +2456,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
case I2O_SCSI_DSC_SUCCESS:
cmd->result = (DID_OK << 16);
// handle underflow
- if(readl(reply+5) < cmd->underflow ) {
+ if (readl(reply+20) < cmd->underflow) {
cmd->result = (DID_ERROR <<16);
printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
}
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index a73a6bbb1b2b..976cdd5c94ef 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1626,8 +1626,15 @@ static void map_dma(unsigned int i, struct hostdata *ha)
cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
- count = scsi_dma_map(SCpnt);
- BUG_ON(count < 0);
+ if (!scsi_sg_count(SCpnt)) {
+ cpp->data_len = 0;
+ return;
+ }
+
+ count = pci_map_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ pci_dir);
+ BUG_ON(!count);
+
scsi_for_each_sg(SCpnt, sg, count, k) {
cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
@@ -1655,7 +1662,9 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
- scsi_dma_unmap(SCpnt);
+ if (scsi_sg_count(SCpnt))
+ pci_unmap_sg(ha->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
+ pci_dir);
if (!DEV2H(cpp->data_len))
pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 952505c006df..152dd15db276 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -14,8 +14,8 @@
* neuffer@goofy.zdv.uni-mainz.de *
* a.arnold@kfa-juelich.de *
* *
- * Updated 2002 by Alan Cox <alan@redhat.com> for Linux *
- * 2.5.x and the newer locking and error handling *
+ * Updated 2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> for *
+ * Linux 2.5.x and the newer locking and error handling *
* *
* This program is free software; you can redistribute it *
* and/or modify it under the terms of the GNU General *
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 62a4618530d0..a680e18b5f3b 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1453,7 +1453,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
offset = 0;
if (offset) {
- int rounded_up, one_clock;
+ int one_clock;
if (period > esp->max_period) {
period = offset = 0;
@@ -1463,9 +1463,7 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
goto do_reject;
one_clock = esp->ccycle / 1000;
- rounded_up = (period << 2);
- rounded_up = (rounded_up + one_clock - 1) / one_clock;
- stp = rounded_up;
+ stp = DIV_ROUND_UP(period << 2, one_clock);
if (stp && esp->rev >= FAS236) {
if (stp >= 50)
stp--;
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 000000000000..b78da06d7c0e
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,8 @@
+# $Id: Makefile
+
+obj-$(CONFIG_FCOE) += fcoe.o
+
+fcoe-y := \
+ libfcoe.o \
+ fcoe_sw.o \
+ fc_transport_fcoe.o
diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
new file mode 100644
index 000000000000..bf7fe6fc0820
--- /dev/null
+++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/pci.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+/* internal fcoe transport */
+struct fcoe_transport_internal {
+ struct fcoe_transport *t;
+ struct net_device *netdev;
+ struct list_head list;
+};
+
+/* fcoe transports list and its lock */
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(fcoe_transports_lock);
+
+/**
+ * fcoe_transport_default - returns ptr to the default transport fcoe_sw
+ **/
+struct fcoe_transport *fcoe_transport_default(void)
+{
+ return &fcoe_sw_transport;
+}
+
+/**
+ * fcoe_transport_to_pcidev - get the pci dev from a netdev
+ * @netdev: the netdev that pci dev will be retrived from
+ *
+ * Returns: NULL or the corrsponding pci_dev
+ **/
+struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
+{
+ if (!netdev->dev.parent)
+ return NULL;
+ return to_pci_dev(netdev->dev.parent);
+}
+
+/**
+ * fcoe_transport_device_lookup - find out netdev is managed by the
+ * transport
+ * assign a transport to a device
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+static struct fcoe_transport_internal *fcoe_transport_device_lookup(
+ struct fcoe_transport *t, struct net_device *netdev)
+{
+ struct fcoe_transport_internal *ti;
+
+ /* assign the transpor to this device */
+ mutex_lock(&t->devlock);
+ list_for_each_entry(ti, &t->devlist, list) {
+ if (ti->netdev == netdev) {
+ mutex_unlock(&t->devlock);
+ return ti;
+ }
+ }
+ mutex_unlock(&t->devlock);
+ return NULL;
+}
+/**
+ * fcoe_transport_device_add - assign a transport to a device
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_transport_device_add(struct fcoe_transport *t,
+ struct net_device *netdev)
+{
+ struct fcoe_transport_internal *ti;
+
+ ti = fcoe_transport_device_lookup(t, netdev);
+ if (ti) {
+ printk(KERN_DEBUG "fcoe_transport_device_add:"
+ "device %s is already added to transport %s\n",
+ netdev->name, t->name);
+ return -EEXIST;
+ }
+ /* allocate an internal struct to host the netdev and the list */
+ ti = kzalloc(sizeof(*ti), GFP_KERNEL);
+ if (!ti)
+ return -ENOMEM;
+
+ ti->t = t;
+ ti->netdev = netdev;
+ INIT_LIST_HEAD(&ti->list);
+ dev_hold(ti->netdev);
+
+ mutex_lock(&t->devlock);
+ list_add(&ti->list, &t->devlist);
+ mutex_unlock(&t->devlock);
+
+ printk(KERN_DEBUG "fcoe_transport_device_add:"
+ "device %s added to transport %s\n",
+ netdev->name, t->name);
+
+ return 0;
+}
+
+/**
+ * fcoe_transport_device_remove - remove a device from its transport
+ * @netdev: the netdev the transport to be attached to
+ *
+ * this removes the device from the transport so the given transport will
+ * not manage this device any more
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_transport_device_remove(struct fcoe_transport *t,
+ struct net_device *netdev)
+{
+ struct fcoe_transport_internal *ti;
+
+ ti = fcoe_transport_device_lookup(t, netdev);
+ if (!ti) {
+ printk(KERN_DEBUG "fcoe_transport_device_remove:"
+ "device %s is not managed by transport %s\n",
+ netdev->name, t->name);
+ return -ENODEV;
+ }
+ mutex_lock(&t->devlock);
+ list_del(&ti->list);
+ mutex_unlock(&t->devlock);
+ printk(KERN_DEBUG "fcoe_transport_device_remove:"
+ "device %s removed from transport %s\n",
+ netdev->name, t->name);
+ dev_put(ti->netdev);
+ kfree(ti);
+ return 0;
+}
+
+/**
+ * fcoe_transport_device_remove_all - remove all from transport devlist
+ *
+ * this removes the device from the transport so the given transport will
+ * not manage this device any more
+ *
+ * Returns: 0 for success
+ **/
+static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
+{
+ struct fcoe_transport_internal *ti, *tmp;
+
+ mutex_lock(&t->devlock);
+ list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
+ list_del(&ti->list);
+ kfree(ti);
+ }
+ mutex_unlock(&t->devlock);
+}
+
+/**
+ * fcoe_transport_match - use the bus device match function to match the hw
+ * @t: the fcoe transport
+ * @netdev:
+ *
+ * This function is used to check if the givne transport wants to manage the
+ * input netdev. if the transports implements the match function, it will be
+ * called, o.w. we just compare the pci vendor and device id.
+ *
+ * Returns: true for match up
+ **/
+static bool fcoe_transport_match(struct fcoe_transport *t,
+ struct net_device *netdev)
+{
+ /* match transport by vendor and device id */
+ struct pci_dev *pci;
+
+ pci = fcoe_transport_pcidev(netdev);
+
+ if (pci) {
+ printk(KERN_DEBUG "fcoe_transport_match:"
+ "%s:%x:%x -- %s:%x:%x\n",
+ t->name, t->vendor, t->device,
+ netdev->name, pci->vendor, pci->device);
+
+ /* if transport supports match */
+ if (t->match)
+ return t->match(netdev);
+
+ /* else just compare the vendor and device id: pci only */
+ return (t->vendor == pci->vendor) && (t->device == pci->device);
+ }
+ return false;
+}
+
+/**
+ * fcoe_transport_lookup - check if the transport is already registered
+ * @t: the transport to be looked up
+ *
+ * This compares the parent device (pci) vendor and device id
+ *
+ * Returns: NULL if not found
+ *
+ * TODO - return default sw transport if no other transport is found
+ **/
+static struct fcoe_transport *fcoe_transport_lookup(
+ struct net_device *netdev)
+{
+ struct fcoe_transport *t;
+
+ mutex_lock(&fcoe_transports_lock);
+ list_for_each_entry(t, &fcoe_transports, list) {
+ if (fcoe_transport_match(t, netdev)) {
+ mutex_unlock(&fcoe_transports_lock);
+ return t;
+ }
+ }
+ mutex_unlock(&fcoe_transports_lock);
+
+ printk(KERN_DEBUG "fcoe_transport_lookup:"
+ "use default transport for %s\n", netdev->name);
+ return fcoe_transport_default();
+}
+
+/**
+ * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
+ * @t: ptr to the fcoe transport to be added
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_register(struct fcoe_transport *t)
+{
+ struct fcoe_transport *tt;
+
+ /* TODO - add fcoe_transport specific initialization here */
+ mutex_lock(&fcoe_transports_lock);
+ list_for_each_entry(tt, &fcoe_transports, list) {
+ if (tt == t) {
+ mutex_unlock(&fcoe_transports_lock);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&t->list, &fcoe_transports);
+ mutex_unlock(&fcoe_transports_lock);
+
+ mutex_init(&t->devlock);
+ INIT_LIST_HEAD(&t->devlist);
+
+ printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_register);
+
+/**
+ * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
+ * @t: ptr to the fcoe transport to be removed
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_unregister(struct fcoe_transport *t)
+{
+ struct fcoe_transport *tt, *tmp;
+
+ mutex_lock(&fcoe_transports_lock);
+ list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
+ if (tt == t) {
+ list_del(&t->list);
+ mutex_unlock(&fcoe_transports_lock);
+ fcoe_transport_device_remove_all(t);
+ printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
+ t->name);
+ return 0;
+ }
+ }
+ mutex_unlock(&fcoe_transports_lock);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
+
+/*
+ * fcoe_load_transport_driver - load an offload driver by alias name
+ * @netdev: the target net device
+ *
+ * Requests for an offload driver module as the fcoe transport, if fails, it
+ * falls back to use the SW HBA (fcoe_sw) as its transport
+ *
+ * TODO -
+ * 1. supports only PCI device
+ * 2. needs fix for VLAn and bonding
+ * 3. pure hw fcoe hba may not have netdev
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_load_transport_driver(struct net_device *netdev)
+{
+ struct pci_dev *pci;
+ struct device *dev = netdev->dev.parent;
+
+ if (fcoe_transport_lookup(netdev)) {
+ /* load default transport */
+ printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
+ netdev->name);
+ return -EEXIST;
+ }
+
+ pci = to_pci_dev(dev);
+ if (dev->bus != &pci_bus_type) {
+ printk(KERN_DEBUG "fcoe: support noly PCI device\n");
+ return -ENODEV;
+ }
+ printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
+ pci->vendor, pci->device);
+
+ return request_module("fcoe-pci-0x%04x-0x%04x",
+ pci->vendor, pci->device);
+
+}
+EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
+
+/**
+ * fcoe_transport_attach - load transport to fcoe
+ * @netdev: the netdev the transport to be attached to
+ *
+ * This will look for existing offload driver, if not found, it falls back to
+ * the default sw hba (fcoe_sw) as its fcoe transport.
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_attach(struct net_device *netdev)
+{
+ struct fcoe_transport *t;
+
+ /* find the corresponding transport */
+ t = fcoe_transport_lookup(netdev);
+ if (!t) {
+ printk(KERN_DEBUG "fcoe_transport_attach"
+ ":no transport for %s:use %s\n",
+ netdev->name, t->name);
+ return -ENODEV;
+ }
+ /* add to the transport */
+ if (fcoe_transport_device_add(t, netdev)) {
+ printk(KERN_DEBUG "fcoe_transport_attach"
+ ":failed to add %s to tramsport %s\n",
+ netdev->name, t->name);
+ return -EIO;
+ }
+ /* transport create function */
+ if (t->create)
+ t->create(netdev);
+
+ printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
+ t->name, netdev->name);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_release - unload transport from fcoe
+ * @netdev: the net device on which fcoe is to be released
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_transport_release(struct net_device *netdev)
+{
+ struct fcoe_transport *t;
+
+ /* find the corresponding transport */
+ t = fcoe_transport_lookup(netdev);
+ if (!t) {
+ printk(KERN_DEBUG "fcoe_transport_release:"
+ "no transport for %s:use %s\n",
+ netdev->name, t->name);
+ return -ENODEV;
+ }
+ /* remove the device from the transport */
+ if (fcoe_transport_device_remove(t, netdev)) {
+ printk(KERN_DEBUG "fcoe_transport_release:"
+ "failed to add %s to tramsport %s\n",
+ netdev->name, t->name);
+ return -EIO;
+ }
+ /* transport destroy function */
+ if (t->destroy)
+ t->destroy(netdev);
+
+ printk(KERN_DEBUG "fcoe_transport_release:"
+ "device %s dettached from transport %s\n",
+ netdev->name, t->name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_transport_release);
+
+/**
+ * fcoe_transport_init - initializes fcoe transport layer
+ *
+ * This prepares for the fcoe transport layer
+ *
+ * Returns: none
+ **/
+int __init fcoe_transport_init(void)
+{
+ INIT_LIST_HEAD(&fcoe_transports);
+ mutex_init(&fcoe_transports_lock);
+ return 0;
+}
+
+/**
+ * fcoe_transport_exit - cleans up the fcoe transport layer
+ * This cleans up the fcoe transport layer. removing any transport on the list,
+ * note that the transport destroy func is not called here.
+ *
+ * Returns: none
+ **/
+int __exit fcoe_transport_exit(void)
+{
+ struct fcoe_transport *t, *tmp;
+
+ mutex_lock(&fcoe_transports_lock);
+ list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
+ list_del(&t->list);
+ mutex_unlock(&fcoe_transports_lock);
+ fcoe_transport_device_remove_all(t);
+ mutex_lock(&fcoe_transports_lock);
+ }
+ mutex_unlock(&fcoe_transports_lock);
+ return 0;
+}
diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
new file mode 100644
index 000000000000..dc4cd5e25760
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sw.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+#define FCOE_SW_VERSION "0.1"
+#define FCOE_SW_NAME "fcoesw"
+#define FCOE_SW_VENDOR "Open-FCoE.org"
+
+#define FCOE_MAX_LUN 255
+#define FCOE_MAX_FCP_TARGET 256
+
+#define FCOE_MAX_OUTSTANDING_COMMANDS 1024
+
+#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
+#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
+
+static struct scsi_transport_template *scsi_transport_fcoe_sw;
+
+struct fc_function_template fcoe_sw_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+};
+
+static struct scsi_host_template fcoe_sw_shost_template = {
+ .module = THIS_MODULE,
+ .name = "FCoE Driver",
+ .proc_name = FCOE_SW_NAME,
+ .queuecommand = fc_queuecommand,
+ .eh_abort_handler = fc_eh_abort,
+ .eh_device_reset_handler = fc_eh_device_reset,
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 32,
+ .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xffff,
+};
+
+/*
+ * fcoe_sw_lport_config - sets up the fc_lport
+ * @lp: ptr to the fc_lport
+ * @shost: ptr to the parent scsi host
+ *
+ * Returns: 0 for success
+ *
+ */
+static int fcoe_sw_lport_config(struct fc_lport *lp)
+{
+ int i = 0;
+
+ lp->link_status = 0;
+ lp->max_retry_count = 3;
+ lp->e_d_tov = 2 * 1000; /* FC-FS default */
+ lp->r_a_tov = 2 * 2 * 1000;
+ lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+
+ /*
+ * allocate per cpu stats block
+ */
+ for_each_online_cpu(i)
+ lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
+ GFP_KERNEL);
+
+ /* lport fc_lport related configuration */
+ fc_lport_config(lp);
+
+ return 0;
+}
+
+/*
+ * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
+ * related properties
+ * @lp : ptr to the fc_lport
+ * @netdev : ptr to the associated netdevice struct
+ *
+ * Must be called after fcoe_sw_lport_config() as it will use lport mutex
+ *
+ * Returns : 0 for success
+ *
+ */
+static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
+{
+ u32 mfs;
+ u64 wwnn, wwpn;
+ struct fcoe_softc *fc;
+ u8 flogi_maddr[ETH_ALEN];
+
+ /* Setup lport private data to point to fcoe softc */
+ fc = lport_priv(lp);
+ fc->lp = lp;
+ fc->real_dev = netdev;
+ fc->phys_dev = netdev;
+
+ /* Require support for get_pauseparam ethtool op. */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ fc->phys_dev = vlan_dev_real_dev(netdev);
+
+ /* Do not support for bonding device */
+ if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
+ (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Determine max frame size based on underlying device and optional
+ * user-configured limit. If the MFS is too low, fcoe_link_ok()
+ * will return 0, so do this first.
+ */
+ mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ if (fc_set_mfs(lp, mfs))
+ return -EINVAL;
+
+ lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ lp->link_status |= FC_LINK_UP;
+
+ /* offload features support */
+ if (fc->real_dev->features & NETIF_F_SG)
+ lp->sg_supp = 1;
+
+
+ skb_queue_head_init(&fc->fcoe_pending_queue);
+
+ /* setup Source Mac Address */
+ memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
+ fc->real_dev->addr_len);
+
+ wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
+ fc_set_wwnn(lp, wwnn);
+ /* XXX - 3rd arg needs to be vlan id */
+ wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
+ fc_set_wwpn(lp, wwpn);
+
+ /*
+ * Add FCoE MAC address as second unicast MAC address
+ * or enter promiscuous mode if not capable of listening
+ * for multiple unicast MACs.
+ */
+ rtnl_lock();
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
+ rtnl_unlock();
+
+ /*
+ * setup the receive function from ethernet driver
+ * on the ethertype for the given device
+ */
+ fc->fcoe_packet_type.func = fcoe_rcv;
+ fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ fc->fcoe_packet_type.dev = fc->real_dev;
+ dev_add_pack(&fc->fcoe_packet_type);
+
+ return 0;
+}
+
+/*
+ * fcoe_sw_shost_config - sets up fc_lport->host
+ * @lp : ptr to the fc_lport
+ * @shost : ptr to the associated scsi host
+ * @dev : device associated to scsi host
+ *
+ * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
+ *
+ * Returns : 0 for success
+ *
+ */
+static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
+ struct device *dev)
+{
+ int rc = 0;
+
+ /* lport scsi host config */
+ lp->host = shost;
+
+ lp->host->max_lun = FCOE_MAX_LUN;
+ lp->host->max_id = FCOE_MAX_FCP_TARGET;
+ lp->host->max_channel = 0;
+ lp->host->transportt = scsi_transport_fcoe_sw;
+
+ /* add the new host to the SCSI-ml */
+ rc = scsi_add_host(lp->host, dev);
+ if (rc) {
+ FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
+ return rc;
+ }
+ sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
+ FCOE_SW_NAME, FCOE_SW_VERSION,
+ fcoe_netdev(lp)->name);
+
+ return 0;
+}
+
+/*
+ * fcoe_sw_em_config - allocates em for this lport
+ * @lp: the port that em is to allocated for
+ *
+ * Returns : 0 on success
+ */
+static inline int fcoe_sw_em_config(struct fc_lport *lp)
+{
+ BUG_ON(lp->emp);
+
+ lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+ FCOE_MIN_XID, FCOE_MAX_XID);
+ if (!lp->emp)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * fcoe_sw_destroy - FCoE software HBA tear-down function
+ * @netdev: ptr to the associated net_device
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ */
+static int fcoe_sw_destroy(struct net_device *netdev)
+{
+ int cpu;
+ struct fc_lport *lp = NULL;
+ struct fcoe_softc *fc;
+ u8 flogi_maddr[ETH_ALEN];
+
+ BUG_ON(!netdev);
+
+ printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
+ netdev->name);
+
+ lp = fcoe_hostlist_lookup(netdev);
+ if (!lp)
+ return -ENODEV;
+
+ fc = fcoe_softc(lp);
+
+ /* Logout of the fabric */
+ fc_fabric_logoff(lp);
+
+ /* Remove the instance from fcoe's list */
+ fcoe_hostlist_remove(lp);
+
+ /* Don't listen for Ethernet packets anymore */
+ dev_remove_pack(&fc->fcoe_packet_type);
+
+ /* Cleanup the fc_lport */
+ fc_lport_destroy(lp);
+ fc_fcp_destroy(lp);
+
+ /* Detach from the scsi-ml */
+ fc_remove_host(lp->host);
+ scsi_remove_host(lp->host);
+
+ /* There are no more rports or I/O, free the EM */
+ if (lp->emp)
+ fc_exch_mgr_free(lp->emp);
+
+ /* Delete secondary MAC addresses */
+ rtnl_lock();
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
+ if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+ dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+ rtnl_unlock();
+
+ /* Free the per-CPU revieve threads */
+ fcoe_percpu_clean(lp);
+
+ /* Free existing skbs */
+ fcoe_clean_pending_queue(lp);
+
+ /* Free memory used by statistical counters */
+ for_each_online_cpu(cpu)
+ kfree(lp->dev_stats[cpu]);
+
+ /* Release the net_device and Scsi_Host */
+ dev_put(fc->real_dev);
+ scsi_host_put(lp->host);
+
+ return 0;
+}
+
+static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
+ .frame_send = fcoe_xmit,
+};
+
+/*
+ * fcoe_sw_create - this function creates the fcoe interface
+ * @netdev: pointer the associated netdevice
+ *
+ * Creates fc_lport struct and scsi_host for lport, configures lport
+ * and starts fabric login.
+ *
+ * Returns : 0 on success
+ */
+static int fcoe_sw_create(struct net_device *netdev)
+{
+ int rc;
+ struct fc_lport *lp = NULL;
+ struct fcoe_softc *fc;
+ struct Scsi_Host *shost;
+
+ BUG_ON(!netdev);
+
+ printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
+ netdev->name);
+
+ lp = fcoe_hostlist_lookup(netdev);
+ if (lp)
+ return -EEXIST;
+
+ shost = fcoe_host_alloc(&fcoe_sw_shost_template,
+ sizeof(struct fcoe_softc));
+ if (!shost) {
+ FC_DBG("Could not allocate host structure\n");
+ return -ENOMEM;
+ }
+ lp = shost_priv(shost);
+ fc = lport_priv(lp);
+
+ /* configure fc_lport, e.g., em */
+ rc = fcoe_sw_lport_config(lp);
+ if (rc) {
+ FC_DBG("Could not configure lport\n");
+ goto out_host_put;
+ }
+
+ /* configure lport network properties */
+ rc = fcoe_sw_netdev_config(lp, netdev);
+ if (rc) {
+ FC_DBG("Could not configure netdev for lport\n");
+ goto out_host_put;
+ }
+
+ /* configure lport scsi host properties */
+ rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
+ if (rc) {
+ FC_DBG("Could not configure shost for lport\n");
+ goto out_host_put;
+ }
+
+ /* lport exch manager allocation */
+ rc = fcoe_sw_em_config(lp);
+ if (rc) {
+ FC_DBG("Could not configure em for lport\n");
+ goto out_host_put;
+ }
+
+ /* Initialize the library */
+ rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
+ if (rc) {
+ FC_DBG("Could not configure libfc for lport!\n");
+ goto out_lp_destroy;
+ }
+
+ /* add to lports list */
+ fcoe_hostlist_add(lp);
+
+ lp->boot_time = jiffies;
+
+ fc_fabric_login(lp);
+
+ dev_hold(netdev);
+
+ return rc;
+
+out_lp_destroy:
+ fc_exch_mgr_free(lp->emp); /* Free the EM */
+out_host_put:
+ scsi_host_put(lp->host);
+ return rc;
+}
+
+/*
+ * fcoe_sw_match - the fcoe sw transport match function
+ *
+ * Returns : false always
+ */
+static bool fcoe_sw_match(struct net_device *netdev)
+{
+ /* FIXME - for sw transport, always return false */
+ return false;
+}
+
+/* the sw hba fcoe transport */
+struct fcoe_transport fcoe_sw_transport = {
+ .name = "fcoesw",
+ .create = fcoe_sw_create,
+ .destroy = fcoe_sw_destroy,
+ .match = fcoe_sw_match,
+ .vendor = 0x0,
+ .device = 0xffff,
+};
+
+/*
+ * fcoe_sw_init - registers fcoe_sw_transport
+ *
+ * Returns : 0 on success
+ */
+int __init fcoe_sw_init(void)
+{
+ /* attach to scsi transport */
+ scsi_transport_fcoe_sw =
+ fc_attach_transport(&fcoe_sw_transport_function);
+ if (!scsi_transport_fcoe_sw) {
+ printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
+ return -ENODEV;
+ }
+ /* register sw transport */
+ fcoe_transport_register(&fcoe_sw_transport);
+ return 0;
+}
+
+/*
+ * fcoe_sw_exit - unregisters fcoe_sw_transport
+ *
+ * Returns : 0 on success
+ */
+int __exit fcoe_sw_exit(void)
+{
+ /* dettach the transport */
+ fc_release_transport(scsi_transport_fcoe_sw);
+ fcoe_transport_unregister(&fcoe_sw_transport);
+ return 0;
+}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
new file mode 100644
index 000000000000..e419f486cdb3
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -0,0 +1,1510 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_encaps.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_frame.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_transport_fcoe.h>
+
+static int debug_fcoe;
+
+#define FCOE_MAX_QUEUE_DEPTH 256
+
+/* destination address mode */
+#define FCOE_GW_ADDR_MODE 0x00
+#define FCOE_FCOUI_ADDR_MODE 0x01
+
+#define FCOE_WORD_TO_BYTE 4
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FCoE");
+MODULE_LICENSE("GPL");
+
+/* fcoe host list */
+LIST_HEAD(fcoe_hostlist);
+DEFINE_RWLOCK(fcoe_hostlist_lock);
+DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
+struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
+
+
+/* Function Prototyes */
+static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
+static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
+static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
+#ifdef CONFIG_HOTPLUG_CPU
+static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
+#endif /* CONFIG_HOTPLUG_CPU */
+static int fcoe_device_notification(struct notifier_block *, ulong, void *);
+static void fcoe_dev_setup(void);
+static void fcoe_dev_cleanup(void);
+
+/* notification function from net device */
+static struct notifier_block fcoe_notifier = {
+ .notifier_call = fcoe_device_notification,
+};
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+static struct notifier_block fcoe_cpu_notifier = {
+ .notifier_call = fcoe_cpu_callback,
+};
+
+/**
+ * fcoe_create_percpu_data - creates the associated cpu data
+ * @cpu: index for the cpu where fcoe cpu data will be created
+ *
+ * create percpu stats block, from cpu add notifier
+ *
+ * Returns: none
+ **/
+static void fcoe_create_percpu_data(int cpu)
+{
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+
+ write_lock_bh(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (lp->dev_stats[cpu] == NULL)
+ lp->dev_stats[cpu] =
+ kzalloc(sizeof(struct fcoe_dev_stats),
+ GFP_KERNEL);
+ }
+ write_unlock_bh(&fcoe_hostlist_lock);
+}
+
+/**
+ * fcoe_destroy_percpu_data - destroys the associated cpu data
+ * @cpu: index for the cpu where fcoe cpu data will destroyed
+ *
+ * destroy percpu stats block called by cpu add/remove notifier
+ *
+ * Retuns: none
+ **/
+static void fcoe_destroy_percpu_data(int cpu)
+{
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+
+ write_lock_bh(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ lp = fc->lp;
+ kfree(lp->dev_stats[cpu]);
+ lp->dev_stats[cpu] = NULL;
+ }
+ write_unlock_bh(&fcoe_hostlist_lock);
+}
+
+/**
+ * fcoe_cpu_callback - fcoe cpu hotplug event callback
+ * @nfb: callback data block
+ * @action: event triggering the callback
+ * @hcpu: index for the cpu of this event
+ *
+ * this creates or destroys per cpu data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ **/
+static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ fcoe_create_percpu_data(cpu);
+ break;
+ case CPU_DEAD:
+ fcoe_destroy_percpu_data(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
+ * @skb: the receive skb
+ * @dev: associated net device
+ * @ptype: context
+ * @odldev: last device
+ *
+ * this function will receive the packet and build fc frame and pass it up
+ *
+ * Returns: 0 for success
+ **/
+int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lp;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ unsigned short oxid;
+ int cpu_idx;
+ struct fcoe_percpu_s *fps;
+
+ fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
+ lp = fc->lp;
+ if (unlikely(lp == NULL)) {
+ FC_DBG("cannot find hba structure");
+ goto err2;
+ }
+
+ if (unlikely(debug_fcoe)) {
+ FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
+ "end:%p sum:%d dev:%s", skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
+
+ }
+
+ /* check for FCOE packet type */
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ FC_DBG("wrong FC type frame");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lp;
+ fr->ptype = ptype;
+ cpu_idx = 0;
+#ifdef CONFIG_SMP
+ /*
+ * The incoming frame exchange id(oxid) is ANDed with num of online
+ * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
+ * a per cpu kernel thread from fcoe_percpu. In case the cpu is
+ * offline or no kernel thread for derived cpu_idx then cpu_idx is
+ * initialize to first online cpu index.
+ */
+ cpu_idx = oxid & (num_online_cpus() - 1);
+ if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
+ cpu_idx = first_cpu(cpu_online_map);
+#endif
+ fps = fcoe_percpu[cpu_idx];
+
+ spin_lock_bh(&fps->fcoe_rx_list.lock);
+ __skb_queue_tail(&fps->fcoe_rx_list, skb);
+ if (fps->fcoe_rx_list.qlen == 1)
+ wake_up_process(fps->thread);
+
+ spin_unlock_bh(&fps->fcoe_rx_list.lock);
+
+ return 0;
+err:
+#ifdef CONFIG_SMP
+ stats = lp->dev_stats[smp_processor_id()];
+#else
+ stats = lp->dev_stats[0];
+#endif
+ if (stats)
+ stats->ErrorFrames++;
+
+err2:
+ kfree_skb(skb);
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_rcv);
+
+/**
+ * fcoe_start_io - pass to netdev to start xmit for fcoe
+ * @skb: the skb to be xmitted
+ *
+ * Returns: 0 for success
+ **/
+static inline int fcoe_start_io(struct sk_buff *skb)
+{
+ int rc;
+
+ skb_get(skb);
+ rc = dev_queue_xmit(skb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+
+/**
+ * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
+ * @skb: the skb to be xmitted
+ * @tlen: total len
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ struct fcoe_percpu_s *fps;
+ struct page *page;
+ int cpu_idx;
+
+ cpu_idx = get_cpu();
+ fps = fcoe_percpu[cpu_idx];
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ put_cpu();
+ return -ENOMEM;
+ }
+ fps->crc_eof_page = page;
+ WARN_ON(fps->crc_eof_offset != 0);
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+ put_cpu();
+ return 0;
+}
+
+/**
+ * fcoe_fc_crc - calculates FC CRC in this fcoe skb
+ * @fp: the fc_frame containg data to be checksummed
+ *
+ * This uses crc32() to calculate the crc for fc frame
+ * Return : 32 bit crc
+ *
+ **/
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = frag->size;
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_xmit - FCoE frame transmit function
+ * @lp: the associated local port
+ * @fp: the fc_frame to be transmitted
+ *
+ * Return : 0 for success
+ *
+ **/
+int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+{
+ int wlen, rc = 0;
+ u32 crc;
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ unsigned int hlen; /* header length implies the version */
+ unsigned int tlen; /* trailer length */
+ unsigned int elen; /* eth header, may include vlan */
+ int flogi_in_progress = 0;
+ struct fcoe_softc *fc;
+ u8 sof, eof;
+ struct fcoe_hdr *hp;
+
+ WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
+
+ fc = fcoe_softc(lp);
+ /*
+ * if it is a flogi then we need to learn gw-addr
+ * and my own fcid
+ */
+ fh = fc_frame_header_get(fp);
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (fc_frame_payload_op(fp) == ELS_FLOGI) {
+ fc->flogi_oxid = ntohs(fh->fh_ox_id);
+ fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+ fc->flogi_progress = 1;
+ flogi_in_progress = 1;
+ } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
+ /*
+ * Here we must've gotten an SID by accepting an FLOGI
+ * from a point-to-point connection. Switch to using
+ * the source mac based on the SID. The destination
+ * MAC in this case would have been set by receving the
+ * FLOGI.
+ */
+ fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
+ fc->flogi_progress = 0;
+ }
+ }
+
+ skb = fp_skb(fp);
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
+ sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ /* crc offload */
+ if (likely(lp->crc_offload)) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum_start = skb_headroom(skb);
+ skb->csum_offset = skb->len;
+ crc = 0;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+ }
+
+ /* copy fc crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ kfree(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_802_3);
+ skb->dev = fc->real_dev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
+
+ if (unlikely(flogi_in_progress))
+ memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* update tx stats: regardless if LLD fails */
+ stats = lp->dev_stats[smp_processor_id()];
+ if (stats) {
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ }
+
+ /* send down to lld */
+ fr_dev(fp) = lp;
+ if (fc->fcoe_pending_queue.qlen)
+ rc = fcoe_check_wait_queue(lp);
+
+ if (rc == 0)
+ rc = fcoe_start_io(skb);
+
+ if (rc) {
+ fcoe_insert_wait_queue(lp, skb);
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ fc_pause(lp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_xmit);
+
+/*
+ * fcoe_percpu_receive_thread - recv thread per cpu
+ * @arg: ptr to the fcoe per cpu struct
+ *
+ * Return: 0 for success
+ *
+ */
+int fcoe_percpu_receive_thread(void *arg)
+{
+ struct fcoe_percpu_s *p = arg;
+ u32 fr_len;
+ struct fc_lport *lp;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ u8 *mac = NULL;
+ struct fcoe_softc *fc;
+ struct fcoe_hdr *hp;
+
+ set_user_nice(current, 19);
+
+ while (!kthread_should_stop()) {
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ if (kthread_should_stop())
+ return 0;
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ fr = fcoe_dev_from_skb(skb);
+ lp = fr->fr_dev;
+ if (unlikely(lp == NULL)) {
+ FC_DBG("invalid HBA Structure");
+ kfree_skb(skb);
+ continue;
+ }
+
+ stats = lp->dev_stats[smp_processor_id()];
+
+ if (unlikely(debug_fcoe)) {
+ FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
+ "tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
+ }
+
+ /*
+ * Save source MAC address before discarding header.
+ */
+ fc = lport_priv(lp);
+ if (unlikely(fc->flogi_progress))
+ mac = eth_hdr(skb)->h_source;
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb); /* not ideal */
+
+ /*
+ * Frame length checks and setting up the header pointers
+ * was done in fcoe_rcv already.
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats) {
+ if (stats->ErrorFrames < 5)
+ FC_DBG("unknown FCoE version %x",
+ FC_FCOE_DECAPS_VER(hp));
+ stats->ErrorFrames++;
+ }
+ kfree_skb(skb);
+ continue;
+ }
+
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ if (stats) {
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ }
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lp;
+ fr_sof(fp) = hp->fcoe_sof;
+
+ /* Copy out the CRC and EOF trailer for access */
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ kfree_skb(skb);
+ continue;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lp->crc_offload)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ fc_exch_recv(lp, lp->emp, fp);
+ continue;
+ }
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (debug_fcoe || stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping "
+ "frame with CRC error\n");
+ stats->InvalidCRCCount++;
+ stats->ErrorFrames++;
+ fc_frame_free(fp);
+ continue;
+ }
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ }
+ /* non flogi and non data exchanges are handled here */
+ if (unlikely(fc->flogi_progress))
+ fcoe_recv_flogi(fc, fp, mac);
+ fc_exch_recv(lp, lp->emp, fp);
+ }
+ return 0;
+}
+
+/**
+ * fcoe_recv_flogi - flogi receive function
+ * @fc: associated fcoe_softc
+ * @fp: the recieved frame
+ * @sa: the source address of this flogi
+ *
+ * This is responsible to parse the flogi response and sets the corresponding
+ * mac address for the initiator, eitehr OUI based or GW based.
+ *
+ * Returns: none
+ **/
+static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
+{
+ struct fc_frame_header *fh;
+ u8 op;
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS)
+ return;
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+ fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
+ /*
+ * FLOGI accepted.
+ * If the src mac addr is FC_OUI-based, then we mark the
+ * address_mode flag to use FC_OUI-based Ethernet DA.
+ * Otherwise we use the FCoE gateway addr
+ */
+ if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
+ fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+ } else {
+ memcpy(fc->dest_addr, sa, ETH_ALEN);
+ fc->address_mode = FCOE_GW_ADDR_MODE;
+ }
+
+ /*
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+ rtnl_lock();
+ if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+ dev_unicast_delete(fc->real_dev, fc->data_src_addr,
+ ETH_ALEN);
+ fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
+ dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+ rtnl_unlock();
+
+ fc->flogi_progress = 0;
+ } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+ /*
+ * Save source MAC for point-to-point responses.
+ */
+ memcpy(fc->dest_addr, sa, ETH_ALEN);
+ fc->address_mode = FCOE_GW_ADDR_MODE;
+ }
+}
+
+/**
+ * fcoe_watchdog - fcoe timer callback
+ * @vp:
+ *
+ * This checks the pending queue length for fcoe and put fcoe to be paused state
+ * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
+ * fcoe_hostlist.
+ *
+ * Returns: 0 for success
+ **/
+void fcoe_watchdog(ulong vp)
+{
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+ int paused = 0;
+
+ read_lock(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (lp) {
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ paused = 1;
+ if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
+ if (paused)
+ fc_unpause(lp);
+ }
+ }
+ }
+ read_unlock(&fcoe_hostlist_lock);
+
+ fcoe_timer.expires = jiffies + (1 * HZ);
+ add_timer(&fcoe_timer);
+}
+
+
+/**
+ * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * This empties the wait_queue, dequeue the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet, if all skb have been
+ * transmitted, return 0 if a error occurs, then restore wait_queue and
+ * try again later.
+ *
+ * The wait_queue is used when the skb transmit fails. skb will go
+ * in the wait_queue which will be emptied by the time function OR
+ * by the next skb transmit.
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_check_wait_queue(struct fc_lport *lp)
+{
+ int rc, unpause = 0;
+ int paused = 0;
+ struct sk_buff *skb;
+ struct fcoe_softc *fc;
+
+ fc = fcoe_softc(lp);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+ /*
+ * is this interface paused?
+ */
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ paused = 1;
+ if (fc->fcoe_pending_queue.qlen) {
+ while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ if (rc) {
+ fcoe_insert_wait_queue_head(lp, skb);
+ return rc;
+ }
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ }
+ if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
+ unpause = 1;
+ }
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ if ((unpause) && (paused))
+ fc_unpause(lp);
+ return fc->fcoe_pending_queue.qlen;
+}
+
+/**
+ * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * Returns: none
+ **/
+static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
+ struct sk_buff *skb)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_softc(lp);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ __skb_queue_head(&fc->fcoe_pending_queue, skb);
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+
+/**
+ * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
+ * @lp: the fc_port for this skb
+ * @skb: the associated skb to be xmitted
+ *
+ * Returns: none
+ **/
+static void fcoe_insert_wait_queue(struct fc_lport *lp,
+ struct sk_buff *skb)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_softc(lp);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+
+/**
+ * fcoe_dev_setup - setup link change notification interface
+ *
+ **/
+static void fcoe_dev_setup(void)
+{
+ /*
+ * here setup a interface specific wd time to
+ * monitor the link state
+ */
+ register_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_dev_setup - cleanup link change notification interface
+ **/
+static void fcoe_dev_cleanup(void)
+{
+ unregister_netdevice_notifier(&fcoe_notifier);
+}
+
+/**
+ * fcoe_device_notification - netdev event notification callback
+ * @notifier: context of the notification
+ * @event: type of event
+ * @ptr: fixed array for output parsed ifname
+ *
+ * This function is called by the ethernet driver in case of link change event
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct fc_lport *lp = NULL;
+ struct net_device *real_dev = ptr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+ u16 new_status;
+ u32 mfs;
+ int rc = NOTIFY_OK;
+
+ read_lock(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ if (fc->real_dev == real_dev) {
+ lp = fc->lp;
+ break;
+ }
+ }
+ read_unlock(&fcoe_hostlist_lock);
+ if (lp == NULL) {
+ rc = NOTIFY_DONE;
+ goto out;
+ }
+
+ new_status = lp->link_status;
+ switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_GOING_DOWN:
+ new_status &= ~FC_LINK_UP;
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ new_status &= ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ new_status |= FC_LINK_UP;
+ break;
+ case NETDEV_CHANGEMTU:
+ mfs = fc->real_dev->mtu -
+ (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ if (mfs >= FC_MIN_MAX_FRAME)
+ fc_set_mfs(lp, mfs);
+ new_status &= ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ new_status |= FC_LINK_UP;
+ break;
+ case NETDEV_REGISTER:
+ break;
+ default:
+ FC_DBG("unknown event %ld call", event);
+ }
+ if (lp->link_status != new_status) {
+ if ((new_status & FC_LINK_UP) == FC_LINK_UP)
+ fc_linkup(lp);
+ else {
+ stats = lp->dev_stats[smp_processor_id()];
+ if (stats)
+ stats->LinkFailureCount++;
+ fc_linkdown(lp);
+ fcoe_clean_pending_queue(lp);
+ }
+ }
+out:
+ return rc;
+}
+
+/**
+ * fcoe_if_to_netdev - parse a name buffer to get netdev
+ * @ifname: fixed array for output parsed ifname
+ * @buffer: incoming buffer to be copied
+ *
+ * Returns: NULL or ptr to netdeive
+ **/
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev
+ * @netdev: the target netdev
+ *
+ * Returns: ptr to the struct module, NULL for failure
+ **/
+static struct module *fcoe_netdev_to_module_owner(
+ const struct net_device *netdev)
+{
+ struct device *dev;
+
+ if (!netdev)
+ return NULL;
+
+ dev = netdev->dev.parent;
+ if (!dev)
+ return NULL;
+
+ if (!dev->driver)
+ return NULL;
+
+ return dev->driver->owner;
+}
+
+/**
+ * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for
+ * the corresponding netdev.
+ * @netdev: the target netdev
+ *
+ * Returns: 0 for succsss
+ **/
+static int fcoe_ethdrv_get(const struct net_device *netdev)
+{
+ struct module *owner;
+
+ owner = fcoe_netdev_to_module_owner(netdev);
+ if (owner) {
+ printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
+ module_name(owner), netdev->name);
+ return try_module_get(owner);
+ }
+ return -ENODEV;
+}
+
+/**
+ * fcoe_ethdrv_get - releases the nic driver module by module_put for
+ * the corresponding netdev.
+ * @netdev: the target netdev
+ *
+ * Returns: 0 for succsss
+ **/
+static int fcoe_ethdrv_put(const struct net_device *netdev)
+{
+ struct module *owner;
+
+ owner = fcoe_netdev_to_module_owner(netdev);
+ if (owner) {
+ printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
+ module_name(owner), netdev->name);
+ module_put(owner);
+ return 0;
+ }
+ return -ENODEV;
+}
+
+/**
+ * fcoe_destroy- handles the destroy from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc;
+ struct net_device *netdev;
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+ /* look for existing lport */
+ if (!fcoe_hostlist_lookup(netdev)) {
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+ /* pass to transport */
+ rc = fcoe_transport_release(netdev);
+ if (rc) {
+ printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
+ netdev->name);
+ rc = -EIO;
+ goto out_putdev;
+ }
+ fcoe_ethdrv_put(netdev);
+ rc = 0;
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ return rc;
+}
+
+/**
+ * fcoe_create - handles the create call from sysfs
+ * @buffer: expcted to be a eth if name
+ * @kp: associated kernel param
+ *
+ * Returns: 0 for success
+ **/
+static int fcoe_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc;
+ struct net_device *netdev;
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+ /* look for existing lport */
+ if (fcoe_hostlist_lookup(netdev)) {
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+ fcoe_ethdrv_get(netdev);
+
+ /* pass to transport */
+ rc = fcoe_transport_attach(netdev);
+ if (rc) {
+ printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
+ netdev->name);
+ fcoe_ethdrv_put(netdev);
+ rc = -EIO;
+ goto out_putdev;
+ }
+ rc = 0;
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ return rc;
+}
+
+module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
+module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, "Destroy fcoe port");
+
+/*
+ * fcoe_link_ok - check if link is ok for the fc_lport
+ * @lp: ptr to the fc_lport
+ *
+ * Any permanently-disqualifying conditions have been previously checked.
+ * This also updates the speed setting, which may change with link for 100/1000.
+ *
+ * This function should probably be checking for PAUSE support at some point
+ * in the future. Currently Per-priority-pause is not determinable using
+ * ethtool, so we shouldn't be restrictive until that problem is resolved.
+ *
+ * Returns: 0 if link is OK for use by FCoE.
+ *
+ */
+int fcoe_link_ok(struct fc_lport *lp)
+{
+ struct fcoe_softc *fc = fcoe_softc(lp);
+ struct net_device *dev = fc->real_dev;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
+ dev = fc->phys_dev;
+ if (dev->ethtool_ops->get_settings) {
+ dev->ethtool_ops->get_settings(dev, &ecmd);
+ lp->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lp->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ if (ecmd.speed == SPEED_1000)
+ lp->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lp->link_speed = FC_PORTSPEED_10GBIT;
+ }
+ } else
+ rc = -1;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_ok);
+
+/*
+ * fcoe_percpu_clean - frees skb of the corresponding lport from the per
+ * cpu queue.
+ * @lp: the fc_lport
+ */
+void fcoe_percpu_clean(struct fc_lport *lp)
+{
+ int idx;
+ struct fcoe_percpu_s *pp;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ for (idx = 0; idx < NR_CPUS; idx++) {
+ if (fcoe_percpu[idx]) {
+ pp = fcoe_percpu[idx];
+ spin_lock_bh(&pp->fcoe_rx_list.lock);
+ list = &pp->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&pp->fcoe_rx_list.lock);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
+
+/**
+ * fcoe_clean_pending_queue - dequeue skb and free it
+ * @lp: the corresponding fc_lport
+ *
+ * Returns: none
+ **/
+void fcoe_clean_pending_queue(struct fc_lport *lp)
+{
+ struct fcoe_softc *fc = lport_priv(lp);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
+ * @sht: ptr to the scsi host templ
+ * @priv_size: size of private data after fc_lport
+ *
+ * Returns: ptr to Scsi_Host
+ * TODO - to libfc?
+ */
+static inline struct Scsi_Host *libfc_host_alloc(
+ struct scsi_host_template *sht, int priv_size)
+{
+ return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
+}
+
+/**
+ * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
+ * @sht: ptr to the scsi host templ
+ * @priv_size: size of private data after fc_lport
+ *
+ * Returns: ptr to Scsi_Host
+ */
+struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
+{
+ return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
+}
+EXPORT_SYMBOL_GPL(fcoe_host_alloc);
+
+/*
+ * fcoe_reset - resets the fcoe
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+int fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_reset);
+
+/*
+ * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
+ * @mac: mac address
+ * @scheme: check port
+ * @port: port indicator for converting
+ *
+ * Returns: u64 fc world wide name
+ */
+u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+ unsigned int scheme, unsigned int port)
+{
+ u64 wwn;
+ u64 host_mac;
+
+ /* The MAC is in NO, so flip only the low 48 bits */
+ host_mac = ((u64) mac[0] << 40) |
+ ((u64) mac[1] << 32) |
+ ((u64) mac[2] << 24) |
+ ((u64) mac[3] << 16) |
+ ((u64) mac[4] << 8) |
+ (u64) mac[5];
+
+ WARN_ON(host_mac >= (1ULL << 48));
+ wwn = host_mac | ((u64) scheme << 60);
+ switch (scheme) {
+ case 1:
+ WARN_ON(port != 0);
+ break;
+ case 2:
+ WARN_ON(port >= 0xfff);
+ wwn |= (u64) port << 48;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return wwn;
+}
+EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
+/*
+ * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
+ * @device: this is currently ptr to net_device
+ *
+ * Returns: NULL or the located fcoe_softc
+ */
+static struct fcoe_softc *fcoe_hostlist_lookup_softc(
+ const struct net_device *dev)
+{
+ struct fcoe_softc *fc;
+
+ read_lock(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ if (fc->real_dev == dev) {
+ read_unlock(&fcoe_hostlist_lock);
+ return fc;
+ }
+ }
+ read_unlock(&fcoe_hostlist_lock);
+ return NULL;
+}
+
+/*
+ * fcoe_hostlist_lookup - find the corresponding lport by netdev
+ * @netdev: ptr to net_device
+ *
+ * Returns: 0 for success
+ */
+struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_hostlist_lookup_softc(netdev);
+
+ return (fc) ? fc->lp : NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
+
+/*
+ * fcoe_hostlist_add - add a lport to lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_add(const struct fc_lport *lp)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+ if (!fc) {
+ fc = fcoe_softc(lp);
+ write_lock_bh(&fcoe_hostlist_lock);
+ list_add_tail(&fc->list, &fcoe_hostlist);
+ write_unlock_bh(&fcoe_hostlist_lock);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
+
+/*
+ * fcoe_hostlist_remove - remove a lport from lports list
+ * @lp: ptr to the fc_lport to badded
+ *
+ * Returns: 0 for success
+ */
+int fcoe_hostlist_remove(const struct fc_lport *lp)
+{
+ struct fcoe_softc *fc;
+
+ fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
+ BUG_ON(!fc);
+ write_lock_bh(&fcoe_hostlist_lock);
+ list_del(&fc->list);
+ write_unlock_bh(&fcoe_hostlist_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
+
+/**
+ * fcoe_libfc_config - sets up libfc related properties for lport
+ * @lp: ptr to the fc_lport
+ * @tt: libfc function template
+ *
+ * Returns : 0 for success
+ **/
+int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
+{
+ /* Set the function pointers set by the LLDD */
+ memcpy(&lp->tt, tt, sizeof(*tt));
+ if (fc_fcp_init(lp))
+ return -ENOMEM;
+ fc_exch_init(lp);
+ fc_elsct_init(lp);
+ fc_lport_init(lp);
+ fc_rport_init(lp);
+ fc_disc_init(lp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+/**
+ * fcoe_init - fcoe module loading initialization
+ *
+ * Initialization routine
+ * 1. Will create fc transport software structure
+ * 2. initialize the link list of port information structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int __init fcoe_init(void)
+{
+ int cpu;
+ struct fcoe_percpu_s *p;
+
+
+ INIT_LIST_HEAD(&fcoe_hostlist);
+ rwlock_init(&fcoe_hostlist_lock);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+ /*
+ * initialize per CPU interrupt thread
+ */
+ for_each_online_cpu(cpu) {
+ p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
+ if (p) {
+ p->thread = kthread_create(fcoe_percpu_receive_thread,
+ (void *)p,
+ "fcoethread/%d", cpu);
+
+ /*
+ * if there is no error then bind the thread to the cpu
+ * initialize the semaphore and skb queue head
+ */
+ if (likely(!IS_ERR(p->thread))) {
+ p->cpu = cpu;
+ fcoe_percpu[cpu] = p;
+ skb_queue_head_init(&p->fcoe_rx_list);
+ kthread_bind(p->thread, cpu);
+ wake_up_process(p->thread);
+ } else {
+ fcoe_percpu[cpu] = NULL;
+ kfree(p);
+
+ }
+ }
+ }
+
+ /*
+ * setup link change notification
+ */
+ fcoe_dev_setup();
+
+ init_timer(&fcoe_timer);
+ fcoe_timer.data = 0;
+ fcoe_timer.function = fcoe_watchdog;
+ fcoe_timer.expires = (jiffies + (10 * HZ));
+ add_timer(&fcoe_timer);
+
+ /* initiatlize the fcoe transport */
+ fcoe_transport_init();
+
+ fcoe_sw_init();
+
+ return 0;
+}
+module_init(fcoe_init);
+
+/**
+ * fcoe_exit - fcoe module unloading cleanup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static void __exit fcoe_exit(void)
+{
+ u32 idx;
+ struct fcoe_softc *fc, *tmp;
+ struct fcoe_percpu_s *p;
+ struct sk_buff *skb;
+
+ /*
+ * Stop all call back interfaces
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+ fcoe_dev_cleanup();
+
+ /*
+ * stop timer
+ */
+ del_timer_sync(&fcoe_timer);
+
+ /* releases the assocaited fcoe transport for each lport */
+ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
+ fcoe_transport_release(fc->real_dev);
+
+ for (idx = 0; idx < NR_CPUS; idx++) {
+ if (fcoe_percpu[idx]) {
+ kthread_stop(fcoe_percpu[idx]->thread);
+ p = fcoe_percpu[idx];
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ if (fcoe_percpu[idx]->crc_eof_page)
+ put_page(fcoe_percpu[idx]->crc_eof_page);
+ kfree(fcoe_percpu[idx]);
+ }
+ }
+
+ /* remove sw trasnport */
+ fcoe_sw_exit();
+
+ /* detach the transport */
+ fcoe_transport_exit();
+}
+module_exit(fcoe_exit);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index c33bcb284df7..32eef66114c7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -3,7 +3,7 @@
* Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
- * Shared IRQ supported added 7/7/2001 Alan Cox <alan@redhat.com>
+ * Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -290,9 +290,11 @@
#include <scsi/scsi_ioctl.h>
#include "fdomain.h"
+#ifndef PCMCIA
MODULE_AUTHOR("Rickard E. Faith");
MODULE_DESCRIPTION("Future domain SCSI driver");
MODULE_LICENSE("GPL");
+#endif
#define VERSION "$Revision: 5.51 $"
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c387c15a2128..fb247fdfa2bd 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -588,7 +588,7 @@ static struct pci_driver gdth_pci_driver = {
.remove = gdth_pci_remove_one,
};
-static void gdth_pci_remove_one(struct pci_dev *pdev)
+static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
@@ -600,7 +600,7 @@ static void gdth_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int gdth_pci_init_one(struct pci_dev *pdev,
+static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
ushort vendor = pdev->vendor;
@@ -853,7 +853,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
-static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
+static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
gdth_ha_str *ha)
{
register gdt6_dpram_str __iomem *dp6_ptr;
@@ -1237,7 +1237,7 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
/* controller protocol functions */
-static void __init gdth_enable_int(gdth_ha_str *ha)
+static void __devinit gdth_enable_int(gdth_ha_str *ha)
{
ulong flags;
gdt2_dpram_str __iomem *dp2_ptr;
@@ -1553,7 +1553,7 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
/* search for devices */
-static int __init gdth_search_drives(gdth_ha_str *ha)
+static int __devinit gdth_search_drives(gdth_ha_str *ha)
{
ushort cdev_cnt, i;
int ok;
@@ -4935,7 +4935,7 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
#endif /* CONFIG_EISA */
#ifdef CONFIG_PCI
-static int gdth_pci_probe_one(gdth_pci_str *pcistr,
+static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
gdth_ha_str **ha_out)
{
struct Scsi_Host *shp;
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 4d15a62914e9..9c1e6a5b5af0 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -10,7 +10,7 @@
See the WWW-page: http://www.uni-mainz.de/~langm000/linux.html for latest
updates, info and ADF-files for adapters supported by this driver.
- Alan Cox <alan@redhat.com>
+ Alan Cox <alan@lxorguk.ukuu.org.uk>
Updated for Linux 2.5.45 to use the new error handler, cleaned up the
lock macros and did a few unavoidable locking tweaks, plus one locking
fix in the irq and completion path.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 4e0b7c8eb32e..44f202f33101 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -121,6 +121,7 @@ static const struct {
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
@@ -278,13 +279,6 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
rsp->data.info.rsp_code))
return DID_ERROR << 16;
- if (!vfc_cmd->status) {
- if (rsp->flags & FCP_RESID_OVER)
- return rsp->scsi_status | (DID_ERROR << 16);
- else
- return rsp->scsi_status | (DID_OK << 16);
- }
-
err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
if (err >= 0)
return rsp->scsi_status | (cmd_status[err].result << 16);
@@ -503,6 +497,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_NONE:
default:
@@ -566,7 +561,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
struct ibmvfc_target *tgt;
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
- if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
@@ -765,6 +760,9 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+
ibmvfc_free_event(evt);
}
@@ -847,11 +845,12 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
- if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ vhost->delay_init = 1;
+ if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
- } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
+ } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
__ibmvfc_reset_host(vhost);
else
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -1252,6 +1251,7 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
evt->sync_iu = NULL;
evt->crq.format = format;
evt->done = done;
+ evt->eh_comp = NULL;
}
/**
@@ -1381,6 +1381,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
add_timer(&evt->timer);
}
+ mb();
+
if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
list_del(&evt->queue);
del_timer(&evt->timer);
@@ -1477,6 +1479,11 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+ if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
+ ibmvfc_reinit_host(evt->vhost);
+
+ if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
+ cmnd->result = (DID_ERROR << 16);
ibmvfc_log_error(evt);
}
@@ -1489,6 +1496,9 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+
ibmvfc_free_event(evt);
}
@@ -1627,7 +1637,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
- struct ibmvfc_event *evt;
+ struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rsp_rc = -EBUSY;
@@ -1789,7 +1799,8 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
- struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct scsi_target *starget = scsi_target(sdev);
+ struct fc_rport *rport = starget_to_rport(starget);
struct ibmvfc_tmf *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
@@ -1827,7 +1838,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
int_to_scsilun(sdev->lun, &tmf->lun);
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = (unsigned long)starget->hostdata;
evt->sync_iu = &rsp;
init_completion(&evt->comp);
@@ -1859,6 +1870,91 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
+ * ibmvfc_match_target - Match function for specified target
+ * @evt: ibmvfc event struct
+ * @device: device to match (starget)
+ *
+ * Returns:
+ * 1 if event matches starget / 0 if event does not match starget
+ **/
+static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_match_lun - Match function for specified LUN
+ * @evt: ibmvfc event struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if event matches sdev / 0 if event does not match sdev
+ **/
+static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
+{
+ if (evt->cmnd && evt->cmnd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ibmvfc_wait_for_ops - Wait for ops to complete
+ * @vhost: ibmvfc host struct
+ * @device: device to match (starget or sdev)
+ * @match: match function
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
+ int (*match) (struct ibmvfc_event *, void *))
+{
+ struct ibmvfc_event *evt;
+ DECLARE_COMPLETION_ONSTACK(comp);
+ int wait;
+ unsigned long flags;
+ signed long timeout = init_timeout * HZ;
+
+ ENTER;
+ do {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (match(evt, device)) {
+ evt->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (wait)
+ dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
+/**
* ibmvfc_eh_abort_handler - Abort a command
* @cmd: scsi command to abort
*
@@ -1867,29 +1963,21 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
**/
static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
{
- struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
- struct ibmvfc_event *evt, *pos;
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, abort_rc;
- unsigned long flags;
+ int rc = FAILED;
ENTER;
ibmvfc_wait_while_resetting(vhost);
- cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
- abort_rc = ibmvfc_abort_task_set(cmd->device);
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+ abort_rc = ibmvfc_abort_task_set(sdev);
- if (!cancel_rc && !abort_rc) {
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
- if (evt->cmnd && evt->cmnd->device == cmd->device)
- ibmvfc_fail_request(evt, DID_ABORT);
- }
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- LEAVE;
- return SUCCESS;
- }
+ if (!cancel_rc && !abort_rc)
+ rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
LEAVE;
- return FAILED;
+ return rc;
}
/**
@@ -1901,29 +1989,21 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
**/
static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
- struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
- struct ibmvfc_event *evt, *pos;
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, reset_rc;
- unsigned long flags;
+ int rc = FAILED;
ENTER;
ibmvfc_wait_while_resetting(vhost);
- cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
- reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
+ cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
- if (!cancel_rc && !reset_rc) {
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
- if (evt->cmnd && evt->cmnd->device == cmd->device)
- ibmvfc_fail_request(evt, DID_ABORT);
- }
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- LEAVE;
- return SUCCESS;
- }
+ if (!cancel_rc && !reset_rc)
+ rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
LEAVE;
- return FAILED;
+ return rc;
}
/**
@@ -1959,31 +2039,23 @@ static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
- struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
- struct scsi_target *starget = scsi_target(cmd->device);
- struct ibmvfc_event *evt, *pos;
+ struct scsi_device *sdev = cmd->device;
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct scsi_target *starget = scsi_target(sdev);
int reset_rc;
+ int rc = FAILED;
unsigned long cancel_rc = 0;
- unsigned long flags;
ENTER;
ibmvfc_wait_while_resetting(vhost);
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
- reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
+ reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
- if (!cancel_rc && !reset_rc) {
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
- if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
- ibmvfc_fail_request(evt, DID_ABORT);
- }
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- LEAVE;
- return SUCCESS;
- }
+ if (!cancel_rc && !reset_rc)
+ rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
LEAVE;
- return FAILED;
+ return rc;
}
/**
@@ -2013,26 +2085,19 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct scsi_target *starget = to_scsi_target(&rport->dev);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
- struct ibmvfc_event *evt, *pos;
unsigned long cancel_rc = 0;
unsigned long abort_rc = 0;
- unsigned long flags;
+ int rc = FAILED;
ENTER;
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
- if (!cancel_rc && !abort_rc) {
- spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
- if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
- ibmvfc_fail_request(evt, DID_ABORT);
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
- } else
- ibmvfc_issue_fc_host_lip(shost);
+ if (!cancel_rc && !abort_rc)
+ rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
- scsi_target_unblock(&rport->dev);
+ if (rc == FAILED)
+ ibmvfc_issue_fc_host_lip(shost);
LEAVE;
}
@@ -2091,15 +2156,17 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
case IBMVFC_AE_LINK_UP:
case IBMVFC_AE_RESUME:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
- ibmvfc_init_host(vhost, 1);
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
break;
case IBMVFC_AE_SCN_FABRIC:
+ case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
- ibmvfc_init_host(vhost, 1);
+ vhost->delay_init = 1;
+ __ibmvfc_reset_host(vhost);
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
- case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
case IBMVFC_AE_ELS_LOGO:
case IBMVFC_AE_ELS_PRLO:
@@ -2265,6 +2332,28 @@ static int ibmvfc_slave_alloc(struct scsi_device *sdev)
}
/**
+ * ibmvfc_target_alloc - Setup the target's task set value
+ * @starget: struct scsi_target
+ *
+ * Set the target's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ * 0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ starget->hostdata = (void *)(unsigned long)vhost->task_set++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
* ibmvfc_slave_configure - Configure the device
* @sdev: struct scsi_device device to configure
*
@@ -2543,6 +2632,7 @@ static struct scsi_host_template driver_template = {
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
.slave_alloc = ibmvfc_slave_alloc,
.slave_configure = ibmvfc_slave_configure,
+ .target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
.change_queue_type = ibmvfc_change_queue_type,
@@ -2639,7 +2729,7 @@ static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
} else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
- crq->valid = 0;
+ async->valid = 0;
} else
done = 1;
}
@@ -2671,7 +2761,7 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
- if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
wake_up(&tgt->vhost->work_wait_q);
} else
@@ -2710,6 +2800,8 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
rsp->status, rsp->error, status);
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@@ -2804,6 +2896,8 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@@ -3095,6 +3189,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
};
@@ -3425,6 +3521,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
case IBMVFC_HOST_ACTION_TGT_ADD:
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
default:
break;
@@ -3521,7 +3618,13 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
case IBMVFC_HOST_ACTION_INIT:
BUG_ON(vhost->state != IBMVFC_INITIALIZING);
- vhost->job_step(vhost);
+ if (vhost->delay_init) {
+ vhost->delay_init = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ssleep(15);
+ return;
+ } else
+ vhost->job_step(vhost);
break;
case IBMVFC_HOST_ACTION_QUERY:
list_for_each_entry(tgt, &vhost->targets, queue)
@@ -3540,6 +3643,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
break;
case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
@@ -3555,8 +3659,17 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
}
if (vhost->state == IBMVFC_INITIALIZING) {
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
- vhost->job_step = ibmvfc_discover_targets;
+ if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
+ ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
+ vhost->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ return;
+ } else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ vhost->job_step = ibmvfc_discover_targets;
+ }
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -3579,14 +3692,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
}
}
- if (!ibmvfc_dev_init_to_do(vhost)) {
- ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
- vhost->init_retries = 0;
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- scsi_unblock_requests(vhost->host);
- return;
- }
+ if (!ibmvfc_dev_init_to_do(vhost))
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
break;
case IBMVFC_HOST_ACTION_TGT_ADD:
list_for_each_entry(tgt, &vhost->targets, queue) {
@@ -3594,16 +3701,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_tgt_add_rport(tgt);
return;
- } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
- tgt_dbg(tgt, "Deleting rport\n");
- rport = tgt->rport;
- tgt->rport = NULL;
- list_del(&tgt->queue);
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
- if (rport)
- fc_remote_port_delete(rport);
- kref_put(&tgt->kref, ibmvfc_release_tgt);
- return;
}
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index fb3177ab6691..babdf3db59df 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,11 +29,11 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
-#define IBMVFC_DRIVER_VERSION "1.0.2"
-#define IBMVFC_DRIVER_DATE "(August 14, 2008)"
+#define IBMVFC_DRIVER_VERSION "1.0.4"
+#define IBMVFC_DRIVER_DATE "(November 14, 2008)"
#define IBMVFC_DEFAULT_TIMEOUT 15
-#define IBMVFC_INIT_TIMEOUT 30
+#define IBMVFC_INIT_TIMEOUT 120
#define IBMVFC_MAX_REQUESTS_DEFAULT 100
#define IBMVFC_DEBUG 0
@@ -43,7 +43,8 @@
#define IBMVFC_MAX_DISC_THREADS 4
#define IBMVFC_TGT_MEMPOOL_SZ 64
#define IBMVFC_MAX_CMDS_PER_LUN 64
-#define IBMVFC_MAX_INIT_RETRIES 3
+#define IBMVFC_MAX_HOST_INIT_RETRIES 6
+#define IBMVFC_MAX_TGT_INIT_RETRIES 3
#define IBMVFC_DEV_LOSS_TMO (5 * 60)
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
@@ -109,6 +110,7 @@ enum ibmvfc_vios_errors {
IBMVFC_TRANS_CANCELLED = 0x0006,
IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
+ IBMVFC_PLOGI_REQUIRED = 0x0010,
IBMVFC_COMMAND_FAILED = 0x8000,
};
@@ -337,7 +339,6 @@ struct ibmvfc_tmf {
#define IBMVFC_TMF_LUA_VALID 0x40
u32 cancel_key;
u32 my_cancel_key;
-#define IBMVFC_TMF_CANCEL_KEY 0x80000000
u32 pad;
u64 reserved[2];
}__attribute__((packed, aligned (8)));
@@ -524,10 +525,10 @@ enum ibmvfc_async_event {
};
struct ibmvfc_crq {
- u8 valid;
- u8 format;
+ volatile u8 valid;
+ volatile u8 format;
u8 reserved[6];
- u64 ioba;
+ volatile u64 ioba;
}__attribute__((packed, aligned (8)));
struct ibmvfc_crq_queue {
@@ -537,13 +538,13 @@ struct ibmvfc_crq_queue {
};
struct ibmvfc_async_crq {
- u8 valid;
+ volatile u8 valid;
u8 pad[3];
u32 pad2;
- u64 event;
- u64 scsi_id;
- u64 wwpn;
- u64 node_name;
+ volatile u64 event;
+ volatile u64 scsi_id;
+ volatile u64 wwpn;
+ volatile u64 node_name;
u64 reserved;
}__attribute__((packed, aligned (8)));
@@ -606,6 +607,7 @@ struct ibmvfc_event {
struct srp_direct_buf *ext_list;
dma_addr_t ext_list_token;
struct completion comp;
+ struct completion *eh_comp;
struct timer_list timer;
};
@@ -626,6 +628,7 @@ enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_TGT_DEL,
IBMVFC_HOST_ACTION_ALLOC_TGTS,
IBMVFC_HOST_ACTION_TGT_INIT,
+ IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
IBMVFC_HOST_ACTION_TGT_ADD,
};
@@ -671,6 +674,7 @@ struct ibmvfc_host {
int discovery_threads;
int client_migrated;
int reinit;
+ int delay_init;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
#define IBMVFC_AE_LINKDOWN 0x0002
@@ -700,7 +704,7 @@ struct ibmvfc_host {
#define ibmvfc_log(vhost, level, ...) \
do { \
- if (level >= (vhost)->log_level) \
+ if ((vhost)->log_level >= level) \
dev_err((vhost)->dev, ##__VA_ARGS__); \
} while (0)
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 87e09f35d3d4..868d35ea01bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -107,7 +107,7 @@ module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_channel, "Largest channel value");
module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
-module_param_named(max_requests, max_requests, int, S_IRUGO | S_IWUSR);
+module_param_named(max_requests, max_requests, int, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
/* ------------------------------------------------------------
@@ -1442,7 +1442,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, lock_flags);
if (sdev->type == TYPE_DISK) {
sdev->allow_restart = 1;
- sdev->timeout = 60 * HZ;
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
}
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1657,7 +1657,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
vdev->dev.driver_data = NULL;
- driver_template.can_queue = max_requests;
+ driver_template.can_queue = max_requests - 2;
host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
if (!host) {
dev_err(&vdev->dev, "couldn't allocate host data\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2a5b29d12172..e2dd6a45924a 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -864,21 +864,23 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&vport->crq_work, handle_crq);
- err = crq_queue_create(&vport->crq_queue, target);
+ err = scsi_add_host(shost, target->dev);
if (err)
goto free_srp_target;
- err = scsi_add_host(shost, target->dev);
+ err = scsi_tgt_alloc_queue(shost);
if (err)
- goto destroy_queue;
+ goto remove_host;
- err = scsi_tgt_alloc_queue(shost);
+ err = crq_queue_create(&vport->crq_queue, target);
if (err)
- goto destroy_queue;
+ goto free_queue;
return 0;
-destroy_queue:
- crq_queue_destroy(target);
+free_queue:
+ scsi_tgt_free_queue(shost);
+remove_host:
+ scsi_remove_host(shost);
free_srp_target:
srp_target_free(target);
put_host:
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 740bad435995..c24140aff8e7 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -343,6 +343,11 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
}
#ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t idescsi_proc[] = {
+ { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
+ { NULL, 0, NULL, NULL }
+};
+
#define ide_scsi_devset_get(name, field) \
static int get_##name(ide_drive_t *drive) \
{ \
@@ -378,6 +383,16 @@ static const struct ide_proc_devset idescsi_settings[] = {
IDE_PROC_DEVSET(transform, 0, 3),
{ 0 },
};
+
+static ide_proc_entry_t *ide_scsi_proc_entries(ide_drive_t *drive)
+{
+ return idescsi_proc;
+}
+
+static const struct ide_proc_devset *ide_scsi_proc_devsets(ide_drive_t *drive)
+{
+ return idescsi_settings;
+}
#endif
/*
@@ -419,13 +434,6 @@ static void ide_scsi_remove(ide_drive_t *drive)
static int ide_scsi_probe(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
-static ide_proc_entry_t idescsi_proc[] = {
- { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
- { NULL, 0, NULL, NULL }
-};
-#endif
-
static ide_driver_t idescsi_driver = {
.gen_driver = {
.owner = THIS_MODULE,
@@ -439,45 +447,39 @@ static ide_driver_t idescsi_driver = {
.end_request = idescsi_end_request,
.error = idescsi_atapi_error,
#ifdef CONFIG_IDE_PROC_FS
- .proc = idescsi_proc,
- .settings = idescsi_settings,
+ .proc_entries = ide_scsi_proc_entries,
+ .proc_devsets = ide_scsi_proc_devsets,
#endif
};
-static int idescsi_ide_open(struct inode *inode, struct file *filp)
+static int idescsi_ide_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ide_scsi_obj *scsi;
+ struct ide_scsi_obj *scsi = ide_scsi_get(bdev->bd_disk);
- if (!(scsi = ide_scsi_get(disk)))
+ if (!scsi)
return -ENXIO;
return 0;
}
-static int idescsi_ide_release(struct inode *inode, struct file *filp)
+static int idescsi_ide_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ide_scsi_obj *scsi = ide_scsi_g(disk);
-
- ide_scsi_put(scsi);
-
+ ide_scsi_put(ide_scsi_g(disk));
return 0;
}
-static int idescsi_ide_ioctl(struct inode *inode, struct file *file,
+static int idescsi_ide_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk);
- return generic_ide_ioctl(scsi->drive, file, bdev, cmd, arg);
+ return generic_ide_ioctl(scsi->drive, bdev, cmd, arg);
}
static struct block_device_operations idescsi_ops = {
.owner = THIS_MODULE,
.open = idescsi_ide_open,
.release = idescsi_ide_release,
- .ioctl = idescsi_ide_ioctl,
+ .locked_ioctl = idescsi_ide_ioctl,
};
static int idescsi_slave_configure(struct scsi_device * sdp)
@@ -576,6 +578,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
{
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive;
+ ide_hwif_t *hwif;
+ ide_hwgroup_t *hwgroup;
int busy;
int ret = FAILED;
@@ -592,13 +596,16 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
goto no_drive;
}
- /* First give it some more time, how much is "right" is hard to say :-( */
+ hwif = drive->hwif;
+ hwgroup = hwif->hwgroup;
- busy = ide_wait_not_busy(HWIF(drive), 100); /* FIXME - uses mdelay which causes latency? */
+ /* First give it some more time, how much is "right" is hard to say :-(
+ FIXME - uses mdelay which causes latency? */
+ busy = ide_wait_not_busy(hwif, 100);
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":"");
- spin_lock_irq(&ide_lock);
+ spin_lock_irq(&hwgroup->lock);
/* If there is no pc running we're done (our interrupt took care of it) */
pc = drive->pc;
@@ -627,7 +634,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
}
ide_unlock:
- spin_unlock_irq(&ide_lock);
+ spin_unlock_irq(&hwgroup->lock);
no_drive:
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed");
@@ -640,6 +647,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
struct request *req;
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive;
+ ide_hwgroup_t *hwgroup;
int ready = 0;
int ret = SUCCESS;
@@ -656,14 +664,18 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
return FAILED;
}
+ hwgroup = drive->hwif->hwgroup;
+
spin_lock_irq(cmd->device->host->host_lock);
- spin_lock(&ide_lock);
+ spin_lock(&hwgroup->lock);
pc = drive->pc;
+ if (pc)
+ req = pc->rq;
- if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) {
+ if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) {
printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
- spin_unlock(&ide_lock);
+ spin_unlock(&hwgroup->lock);
spin_unlock_irq(cmd->device->host->host_lock);
return FAILED;
}
@@ -683,10 +695,10 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
BUG();
}
- HWGROUP(drive)->rq = NULL;
- HWGROUP(drive)->handler = NULL;
- HWGROUP(drive)->busy = 1; /* will set this to zero when ide reset finished */
- spin_unlock(&ide_lock);
+ hwgroup->rq = NULL;
+ hwgroup->handler = NULL;
+ hwgroup->busy = 1; /* will set this to zero when ide reset finished */
+ spin_unlock(&hwgroup->lock);
ide_do_reset(drive);
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index 8053b1e86ccb..52bdc6df6b92 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -107,7 +107,7 @@
* this thing into as good a shape as possible, and I'm positive
* there are lots of lurking bugs and "Stupid Places".
*
- * Updated for Linux 2.5 by Alan Cox <alan@redhat.com>
+ * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Using new_eh handler
* - Hopefully got all the locking right again
* See "FIXME" notes for items that could do with more work
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index e3f739776bad..5529518ff2fa 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -4,7 +4,7 @@
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
* Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
- * Copyright (c) 2007 Red Hat <alan@redhat.com>
+ * Copyright (c) 2007 Red Hat
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index cb48efa81fe2..e58af9e95506 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,7 +4,7 @@
* Copyright (c) 1994-1998 Initio Corporation
* All rights reserved.
*
- * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
+ * Cleanups (c) Copyright 2007 Red Hat <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d30eb7ba018e..0edfb1fa63ce 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2456,20 +2456,14 @@ static ssize_t ipr_read_trace(struct kobject *kobj,
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
- int size = IPR_TRACE_SIZE;
- char *src = (char *)ioa_cfg->trace;
-
- if (off > size)
- return 0;
- if (off + count > size) {
- size -= off;
- count = size;
- }
+ ssize_t ret;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- memcpy(buf, &src[off], count);
+ ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
+ IPR_TRACE_SIZE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return count;
+
+ return ret;
}
static struct bin_attribute ipr_trace_attr = {
@@ -5395,9 +5389,9 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
+ spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ spin_lock(ioa_cfg->host->host_lock);
if (!ioa_cfg->allow_cmds)
scsi_block_requests(ioa_cfg->host);
@@ -7479,7 +7473,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto out_scsi_host_put;
}
- ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
+ ipr_regs = pci_ioremap_bar(pdev, 0);
if (!ipr_regs) {
dev_err(&pdev->dev,
@@ -7859,7 +7853,6 @@ static struct pci_driver ipr_driver = {
.remove = ipr_remove,
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
- .dynids.use_driver_data = 1
};
/**
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4871dd1f2582..59459141b437 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -19,7 +19,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * Alan Cox <alan@redhat.com> - Removed several careless u32/dma_addr_t errors
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> - Removed several careless u32/dma_addr_t errors
* that broke 64bit platforms.
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2a2f0094570f..23808dfe22ba 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -27,7 +27,6 @@
*/
#include <linux/types.h>
-#include <linux/list.h>
#include <linux/inet.h>
#include <linux/file.h>
#include <linux/blkdev.h>
@@ -44,12 +43,12 @@
#include "iscsi_tcp.h"
-MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+ "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
#undef DEBUG_TCP
-#define DEBUG_ASSERT
#ifdef DEBUG_TCP
#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
@@ -57,953 +56,41 @@ MODULE_LICENSE("GPL");
#define debug_tcp(fmt...)
#endif
-#ifndef DEBUG_ASSERT
-#ifdef BUG_ON
-#undef BUG_ON
-#endif
-#define BUG_ON(expr)
-#endif
-
-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
-static struct scsi_host_template iscsi_sht;
-static struct iscsi_transport iscsi_tcp_transport;
+static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sw_tcp_sht;
+static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment);
-
-/*
- * Scatterlist handling: inside the iscsi_segment, we
- * remember an index into the scatterlist, and set data/size
- * to the current scatterlist entry. For highmem pages, we
- * kmap as needed.
- *
- * Note that the page is unmapped when we return from
- * TCP's data_ready handler, so we may end up mapping and
- * unmapping the same page repeatedly. The whole reason
- * for this is that we shouldn't keep the page mapped
- * outside the softirq.
- */
-
-/**
- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
- * @segment: the buffer object
- * @sg: scatterlist
- * @offset: byte offset into that sg entry
- *
- * This function sets up the segment so that subsequent
- * data is copied to the indicated sg entry, at the given
- * offset.
- */
-static inline void
-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
- struct scatterlist *sg, unsigned int offset)
-{
- segment->sg = sg;
- segment->sg_offset = offset;
- segment->size = min(sg->length - offset,
- segment->total_size - segment->total_copied);
- segment->data = NULL;
-}
-
-/**
- * iscsi_tcp_segment_map - map the current S/G page
- * @segment: iscsi_segment
- * @recv: 1 if called from recv path
- *
- * We only need to possibly kmap data if scatter lists are being used,
- * because the iscsi passthrough and internal IO paths will never use high
- * mem pages.
- */
-static inline void
-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
-{
- struct scatterlist *sg;
-
- if (segment->data != NULL || !segment->sg)
- return;
-
- sg = segment->sg;
- BUG_ON(segment->sg_mapped);
- BUG_ON(sg->length == 0);
-
- /*
- * If the page count is greater than one it is ok to send
- * to the network layer's zero copy send path. If not we
- * have to go the slow sendmsg path. We always map for the
- * recv path.
- */
- if (page_count(sg_page(sg)) >= 1 && !recv)
- return;
-
- debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
- segment);
- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
- segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
-}
-
-static inline void
-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
-{
- debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
-
- if (segment->sg_mapped) {
- debug_tcp("iscsi_tcp_segment_unmap valid\n");
- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
- segment->sg_mapped = NULL;
- segment->data = NULL;
- }
-}
-
-/*
- * Splice the digest buffer into the buffer
- */
-static inline void
-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
-{
- segment->data = digest;
- segment->digest_len = ISCSI_DIGEST_SIZE;
- segment->total_size += ISCSI_DIGEST_SIZE;
- segment->size = ISCSI_DIGEST_SIZE;
- segment->copied = 0;
- segment->sg = NULL;
- segment->hash = NULL;
-}
-
-/**
- * iscsi_tcp_segment_done - check whether the segment is complete
- * @segment: iscsi segment to check
- * @recv: set to one of this is called from the recv path
- * @copied: number of bytes copied
- *
- * Check if we're done receiving this segment. If the receive
- * buffer is full but we expect more data, move on to the
- * next entry in the scatterlist.
- *
- * If the amount of data we received isn't a multiple of 4,
- * we will transparently receive the pad bytes, too.
- *
- * This function must be re-entrant.
- */
-static inline int
-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
-{
- static unsigned char padbuf[ISCSI_PAD_LEN];
- struct scatterlist sg;
- unsigned int pad;
-
- debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
- segment->size, recv ? "recv" : "xmit");
- if (segment->hash && copied) {
- /*
- * If a segment is kmapd we must unmap it before sending
- * to the crypto layer since that will try to kmap it again.
- */
- iscsi_tcp_segment_unmap(segment);
-
- if (!segment->data) {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, sg_page(segment->sg), copied,
- segment->copied + segment->sg_offset +
- segment->sg->offset);
- } else
- sg_init_one(&sg, segment->data + segment->copied,
- copied);
- crypto_hash_update(segment->hash, &sg, copied);
- }
-
- segment->copied += copied;
- if (segment->copied < segment->size) {
- iscsi_tcp_segment_map(segment, recv);
- return 0;
- }
-
- segment->total_copied += segment->copied;
- segment->copied = 0;
- segment->size = 0;
-
- /* Unmap the current scatterlist page, if there is one. */
- iscsi_tcp_segment_unmap(segment);
-
- /* Do we have more scatterlist entries? */
- debug_tcp("total copied %u total size %u\n", segment->total_copied,
- segment->total_size);
- if (segment->total_copied < segment->total_size) {
- /* Proceed to the next entry in the scatterlist. */
- iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
- 0);
- iscsi_tcp_segment_map(segment, recv);
- BUG_ON(segment->size == 0);
- return 0;
- }
-
- /* Do we need to handle padding? */
- pad = iscsi_padding(segment->total_copied);
- if (pad != 0) {
- debug_tcp("consume %d pad bytes\n", pad);
- segment->total_size += pad;
- segment->size = pad;
- segment->data = padbuf;
- return 0;
- }
-
- /*
- * Set us up for transferring the data digest. hdr digest
- * is completely handled in hdr done function.
- */
- if (segment->hash) {
- crypto_hash_final(segment->hash, segment->digest);
- iscsi_tcp_segment_splice_digest(segment,
- recv ? segment->recv_digest : segment->digest);
- return 0;
- }
-
- return 1;
-}
-
/**
- * iscsi_tcp_xmit_segment - transmit segment
- * @tcp_conn: the iSCSI TCP connection
- * @segment: the buffer to transmnit
- *
- * This function transmits as much of the buffer as
- * the network layer will accept, and returns the number of
- * bytes transmitted.
- *
- * If CRC hashing is enabled, the function will compute the
- * hash as it goes. When the entire segment has been transmitted,
- * it will retrieve the hash value and send it as well.
- */
-static int
-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
-{
- struct socket *sk = tcp_conn->sock;
- unsigned int copied = 0;
- int r = 0;
-
- while (!iscsi_tcp_segment_done(segment, 0, r)) {
- struct scatterlist *sg;
- unsigned int offset, copy;
- int flags = 0;
-
- r = 0;
- offset = segment->copied;
- copy = segment->size - offset;
-
- if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
-
- /* Use sendpage if we can; else fall back to sendmsg */
- if (!segment->data) {
- sg = segment->sg;
- offset += segment->sg_offset + sg->offset;
- r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
- flags);
- } else {
- struct msghdr msg = { .msg_flags = flags };
- struct kvec iov = {
- .iov_base = segment->data + offset,
- .iov_len = copy
- };
-
- r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
- }
-
- if (r < 0) {
- iscsi_tcp_segment_unmap(segment);
- if (copied || r == -EAGAIN)
- break;
- return r;
- }
- copied += r;
- }
- return copied;
-}
-
-/**
- * iscsi_tcp_segment_recv - copy data to segment
- * @tcp_conn: the iSCSI TCP connection
- * @segment: the buffer to copy to
- * @ptr: data pointer
- * @len: amount of data available
- *
- * This function copies up to @len bytes to the
- * given buffer, and returns the number of bytes
- * consumed, which can actually be less than @len.
- *
- * If hash digest is enabled, the function will update the
- * hash while copying.
- * Combining these two operations doesn't buy us a lot (yet),
- * but in the future we could implement combined copy+crc,
- * just way we do for network layer checksums.
- */
-static int
-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment, const void *ptr,
- unsigned int len)
-{
- unsigned int copy = 0, copied = 0;
-
- while (!iscsi_tcp_segment_done(segment, 1, copy)) {
- if (copied == len) {
- debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
- len);
- break;
- }
-
- copy = min(len - copied, segment->size - segment->copied);
- debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
- memcpy(segment->data + segment->copied, ptr + copied, copy);
- copied += copy;
- }
- return copied;
-}
-
-static inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
- unsigned char digest[ISCSI_DIGEST_SIZE])
-{
- struct scatterlist sg;
-
- sg_init_one(&sg, hdr, hdrlen);
- crypto_hash_digest(hash, &sg, hdrlen, digest);
-}
-
-static inline int
-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
-{
- if (!segment->digest_len)
- return 1;
-
- if (memcmp(segment->recv_digest, segment->digest,
- segment->digest_len)) {
- debug_scsi("digest mismatch\n");
- return 0;
- }
-
- return 1;
-}
-
-/*
- * Helper function to set up segment buffer
- */
-static inline void
-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
-{
- memset(segment, 0, sizeof(*segment));
- segment->total_size = size;
- segment->done = done;
-
- if (hash) {
- segment->hash = hash;
- crypto_hash_init(hash);
- }
-}
-
-static inline void
-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
- size_t size, iscsi_segment_done_fn_t *done,
- struct hash_desc *hash)
-{
- __iscsi_segment_init(segment, size, done, hash);
- segment->data = data;
- segment->size = size;
-}
-
-static inline int
-iscsi_segment_seek_sg(struct iscsi_segment *segment,
- struct scatterlist *sg_list, unsigned int sg_count,
- unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
-{
- struct scatterlist *sg;
- unsigned int i;
-
- debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
- offset, size);
- __iscsi_segment_init(segment, size, done, hash);
- for_each_sg(sg_list, sg, sg_count, i) {
- debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
- sg->offset);
- if (offset < sg->length) {
- iscsi_tcp_segment_init_sg(segment, sg, offset);
- return 0;
- }
- offset -= sg->length;
- }
-
- return ISCSI_ERR_DATA_OFFSET;
-}
-
-/**
- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
- * @tcp_conn: iscsi connection to prep for
- *
- * This function always passes NULL for the hash argument, because when this
- * function is called we do not yet know the final size of the header and want
- * to delay the digest processing until we know that.
- */
-static void
-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
-{
- debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
- tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
- iscsi_segment_init_linear(&tcp_conn->in.segment,
- tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
- iscsi_tcp_hdr_recv_done, NULL);
-}
-
-/*
- * Handle incoming reply to any other type of command
- */
-static int
-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
-{
- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- int rc = 0;
-
- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
- return ISCSI_ERR_DATA_DGST;
-
- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
- conn->data, tcp_conn->in.datalen);
- if (rc)
- return rc;
-
- iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
-}
-
-static void
-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
-{
- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct hash_desc *rx_hash = NULL;
-
- if (conn->datadgst_en)
- rx_hash = &tcp_conn->rx_hash;
-
- iscsi_segment_init_linear(&tcp_conn->in.segment,
- conn->data, tcp_conn->in.datalen,
- iscsi_tcp_data_recv_done, rx_hash);
-}
-
-/*
- * must be called with session lock
- */
-static void
-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
-{
- struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct iscsi_r2t_info *r2t;
-
- /* nothing to do for mgmt tasks */
- if (!task->sc)
- return;
-
- /* flush task's r2t queues */
- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
- }
-
- r2t = tcp_task->r2t;
- if (r2t != NULL) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
- tcp_task->r2t = NULL;
- }
-}
-
-/**
- * iscsi_data_rsp - SCSI Data-In Response processing
- * @conn: iscsi connection
- * @task: scsi command task
- **/
-static int
-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
- struct iscsi_session *session = conn->session;
- struct scsi_cmnd *sc = task->sc;
- int datasn = be32_to_cpu(rhdr->datasn);
- unsigned total_in_length = scsi_in(sc)->length;
-
- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (tcp_conn->in.datalen == 0)
- return 0;
-
- if (tcp_task->exp_datasn != datasn) {
- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
- __func__, tcp_task->exp_datasn, datasn);
- return ISCSI_ERR_DATASN;
- }
-
- tcp_task->exp_datasn++;
-
- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
- debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
- __func__, tcp_task->data_offset,
- tcp_conn->in.datalen, total_in_length);
- return ISCSI_ERR_DATA_OFFSET;
- }
-
- if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
- sc->result = (DID_OK << 16) | rhdr->cmd_status;
- conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
- ISCSI_FLAG_DATA_OVERFLOW)) {
- int res_count = be32_to_cpu(rhdr->residual_count);
-
- if (res_count > 0 &&
- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
- res_count <= total_in_length))
- scsi_in(sc)->resid = res_count;
- else
- sc->result = (DID_BAD_TARGET << 16) |
- rhdr->cmd_status;
- }
- }
-
- conn->datain_pdus_cnt++;
- return 0;
-}
-
-/**
- * iscsi_solicit_data_init - initialize first Data-Out
- * @conn: iscsi connection
- * @task: scsi command task
- * @r2t: R2T info
- *
- * Notes:
- * Initialize first Data-Out within this R2T sequence and finds
- * proper data_offset within this SCSI command.
- *
- * This function is called with connection lock taken.
- **/
-static void
-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
- struct iscsi_r2t_info *r2t)
-{
- struct iscsi_data *hdr;
-
- hdr = &r2t->dtask.hdr;
- memset(hdr, 0, sizeof(struct iscsi_data));
- hdr->ttt = r2t->ttt;
- hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
- r2t->solicit_datasn++;
- hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
- hdr->itt = task->hdr->itt;
- hdr->exp_statsn = r2t->exp_statsn;
- hdr->offset = cpu_to_be32(r2t->data_offset);
- if (r2t->data_length > conn->max_xmit_dlength) {
- hton24(hdr->dlength, conn->max_xmit_dlength);
- r2t->data_count = conn->max_xmit_dlength;
- hdr->flags = 0;
- } else {
- hton24(hdr->dlength, r2t->data_length);
- r2t->data_count = r2t->data_length;
- hdr->flags = ISCSI_FLAG_CMD_FINAL;
- }
- conn->dataout_pdus_cnt++;
-
- r2t->sent = 0;
-}
-
-/**
- * iscsi_r2t_rsp - iSCSI R2T Response processing
- * @conn: iscsi connection
- * @task: scsi command task
- **/
-static int
-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
-{
- struct iscsi_r2t_info *r2t;
- struct iscsi_session *session = conn->session;
- struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
- int r2tsn = be32_to_cpu(rhdr->r2tsn);
- int rc;
-
- if (tcp_conn->in.datalen) {
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid R2t with datalen %d\n",
- tcp_conn->in.datalen);
- return ISCSI_ERR_DATALEN;
- }
-
- if (tcp_task->exp_datasn != r2tsn){
- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
- __func__, tcp_task->exp_datasn, r2tsn);
- return ISCSI_ERR_R2TSN;
- }
-
- /* fill-in new R2T associated with the task */
- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
-
- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
- iscsi_conn_printk(KERN_INFO, conn,
- "dropping R2T itt %d in recovery.\n",
- task->itt);
- return 0;
- }
-
- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
- BUG_ON(!rc);
-
- r2t->exp_statsn = rhdr->statsn;
- r2t->data_length = be32_to_cpu(rhdr->data_length);
- if (r2t->data_length == 0) {
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid R2T with zero data len\n");
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
- return ISCSI_ERR_DATALEN;
- }
-
- if (r2t->data_length > session->max_burst)
- debug_scsi("invalid R2T with data len %u and max burst %u."
- "Attempting to execute request.\n",
- r2t->data_length, session->max_burst);
-
- r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
- iscsi_conn_printk(KERN_ERR, conn,
- "invalid R2T with data len %u at offset %u "
- "and total length %d\n", r2t->data_length,
- r2t->data_offset, scsi_out(task->sc)->length);
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
- return ISCSI_ERR_DATALEN;
- }
-
- r2t->ttt = rhdr->ttt; /* no flip */
- r2t->solicit_datasn = 0;
-
- iscsi_solicit_data_init(conn, task, r2t);
-
- tcp_task->exp_datasn = r2tsn + 1;
- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
- conn->r2t_pdus_cnt++;
-
- iscsi_requeue_task(task);
- return 0;
-}
-
-/*
- * Handle incoming reply to DataIn command
- */
-static int
-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
-{
- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct iscsi_hdr *hdr = tcp_conn->in.hdr;
- int rc;
-
- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
- return ISCSI_ERR_DATA_DGST;
-
- /* check for non-exceptional status */
- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
- if (rc)
- return rc;
- }
-
- iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
-}
-
-/**
- * iscsi_tcp_hdr_dissect - process PDU header
- * @conn: iSCSI connection
- * @hdr: PDU header
- *
- * This function analyzes the header of the PDU received,
- * and performs several sanity checks. If the PDU is accompanied
- * by data, the receive buffer is set up to copy the incoming data
- * to the correct location.
- */
-static int
-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
-{
- int rc = 0, opcode, ahslen;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_task *task;
-
- /* verify PDU length */
- tcp_conn->in.datalen = ntoh24(hdr->dlength);
- if (tcp_conn->in.datalen > conn->max_recv_dlength) {
- iscsi_conn_printk(KERN_ERR, conn,
- "iscsi_tcp: datalen %d > %d\n",
- tcp_conn->in.datalen, conn->max_recv_dlength);
- return ISCSI_ERR_DATALEN;
- }
-
- /* Additional header segments. So far, we don't
- * process additional headers.
- */
- ahslen = hdr->hlength << 2;
-
- opcode = hdr->opcode & ISCSI_OPCODE_MASK;
- /* verify itt (itt encoding: age+cid+itt) */
- rc = iscsi_verify_itt(conn, hdr->itt);
- if (rc)
- return rc;
-
- debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
- opcode, ahslen, tcp_conn->in.datalen);
-
- switch(opcode) {
- case ISCSI_OP_SCSI_DATA_IN:
- spin_lock(&conn->session->lock);
- task = iscsi_itt_to_ctask(conn, hdr->itt);
- if (!task)
- rc = ISCSI_ERR_BAD_ITT;
- else
- rc = iscsi_data_rsp(conn, task);
- if (rc) {
- spin_unlock(&conn->session->lock);
- break;
- }
-
- if (tcp_conn->in.datalen) {
- struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct hash_desc *rx_hash = NULL;
- struct scsi_data_buffer *sdb = scsi_in(task->sc);
-
- /*
- * Setup copy of Data-In into the Scsi_Cmnd
- * Scatterlist case:
- * We set up the iscsi_segment to point to the next
- * scatterlist entry to copy to. As we go along,
- * we move on to the next scatterlist entry and
- * update the digest per-entry.
- */
- if (conn->datadgst_en)
- rx_hash = &tcp_conn->rx_hash;
-
- debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
- "datalen=%d)\n", tcp_conn,
- tcp_task->data_offset,
- tcp_conn->in.datalen);
- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
- sdb->table.sgl,
- sdb->table.nents,
- tcp_task->data_offset,
- tcp_conn->in.datalen,
- iscsi_tcp_process_data_in,
- rx_hash);
- spin_unlock(&conn->session->lock);
- return rc;
- }
- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
- spin_unlock(&conn->session->lock);
- break;
- case ISCSI_OP_SCSI_CMD_RSP:
- if (tcp_conn->in.datalen) {
- iscsi_tcp_data_recv_prep(tcp_conn);
- return 0;
- }
- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
- break;
- case ISCSI_OP_R2T:
- spin_lock(&conn->session->lock);
- task = iscsi_itt_to_ctask(conn, hdr->itt);
- if (!task)
- rc = ISCSI_ERR_BAD_ITT;
- else if (ahslen)
- rc = ISCSI_ERR_AHSLEN;
- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
- rc = iscsi_r2t_rsp(conn, task);
- else
- rc = ISCSI_ERR_PROTO;
- spin_unlock(&conn->session->lock);
- break;
- case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_TEXT_RSP:
- case ISCSI_OP_REJECT:
- case ISCSI_OP_ASYNC_EVENT:
- /*
- * It is possible that we could get a PDU with a buffer larger
- * than 8K, but there are no targets that currently do this.
- * For now we fail until we find a vendor that needs it
- */
- if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
- iscsi_conn_printk(KERN_ERR, conn,
- "iscsi_tcp: received buffer of "
- "len %u but conn buffer is only %u "
- "(opcode %0x)\n",
- tcp_conn->in.datalen,
- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
- rc = ISCSI_ERR_PROTO;
- break;
- }
-
- /* If there's data coming in with the response,
- * receive it to the connection's buffer.
- */
- if (tcp_conn->in.datalen) {
- iscsi_tcp_data_recv_prep(tcp_conn);
- return 0;
- }
- /* fall through */
- case ISCSI_OP_LOGOUT_RSP:
- case ISCSI_OP_NOOP_IN:
- case ISCSI_OP_SCSI_TMFUNC_RSP:
- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
- break;
- default:
- rc = ISCSI_ERR_BAD_OPCODE;
- break;
- }
-
- if (rc == 0) {
- /* Anything that comes with data should have
- * been handled above. */
- if (tcp_conn->in.datalen)
- return ISCSI_ERR_PROTO;
- iscsi_tcp_hdr_recv_prep(tcp_conn);
- }
-
- return rc;
-}
-
-/**
- * iscsi_tcp_hdr_recv_done - process PDU header
- *
- * This is the callback invoked when the PDU header has
- * been received. If the header is followed by additional
- * header segments, we go back for more data.
- */
-static int
-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
-{
- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct iscsi_hdr *hdr;
-
- /* Check if there are additional header segments
- * *prior* to computing the digest, because we
- * may need to go back to the caller for more.
- */
- hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
- if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
- /* Bump the header length - the caller will
- * just loop around and get the AHS for us, and
- * call again. */
- unsigned int ahslen = hdr->hlength << 2;
-
- /* Make sure we don't overflow */
- if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
- return ISCSI_ERR_AHSLEN;
-
- segment->total_size += ahslen;
- segment->size += ahslen;
- return 0;
- }
-
- /* We're done processing the header. See if we're doing
- * header digests; if so, set up the recv_digest buffer
- * and go back for more. */
- if (conn->hdrdgst_en) {
- if (segment->digest_len == 0) {
- iscsi_tcp_segment_splice_digest(segment,
- segment->recv_digest);
- return 0;
- }
- iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
- segment->total_copied - ISCSI_DIGEST_SIZE,
- segment->digest);
-
- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
- return ISCSI_ERR_HDR_DGST;
- }
-
- tcp_conn->in.hdr = hdr;
- return iscsi_tcp_hdr_dissect(conn, hdr);
-}
-
-/**
- * iscsi_tcp_recv - TCP receive in sendfile fashion
+ * iscsi_sw_tcp_recv - TCP receive in sendfile fashion
* @rd_desc: read descriptor
* @skb: socket buffer
* @offset: offset in skb
* @len: skb->len - offset
- **/
-static int
-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
- unsigned int offset, size_t len)
+ */
+static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
{
struct iscsi_conn *conn = rd_desc->arg.data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_segment *segment = &tcp_conn->in.segment;
- struct skb_seq_state seq;
- unsigned int consumed = 0;
- int rc = 0;
+ unsigned int consumed, total_consumed = 0;
+ int status;
debug_tcp("in %d bytes\n", skb->len - offset);
- if (unlikely(conn->suspend_rx)) {
- debug_tcp("conn %d Rx suspended!\n", conn->id);
- return 0;
- }
-
- skb_prepare_seq_read(skb, offset, skb->len, &seq);
- while (1) {
- unsigned int avail;
- const u8 *ptr;
-
- avail = skb_seq_read(consumed, &ptr, &seq);
- if (avail == 0) {
- debug_tcp("no more data avail. Consumed %d\n",
- consumed);
- break;
- }
- BUG_ON(segment->copied >= segment->size);
-
- debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
- rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
- BUG_ON(rc == 0);
- consumed += rc;
-
- if (segment->total_copied >= segment->total_size) {
- debug_tcp("segment done\n");
- rc = segment->done(tcp_conn, segment);
- if (rc != 0) {
- skb_abort_seq_read(&seq);
- goto error;
- }
-
- /* The done() functions sets up the
- * next segment. */
- }
- }
- skb_abort_seq_read(&seq);
- conn->rxdata_octets += consumed;
- return consumed;
+ do {
+ status = 0;
+ consumed = iscsi_tcp_recv_skb(conn, skb, offset, 0, &status);
+ offset += consumed;
+ total_consumed += consumed;
+ } while (consumed != 0 && status != ISCSI_TCP_SKB_DONE);
-error:
- debug_tcp("Error receiving PDU, errno=%d\n", rc);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return 0;
+ debug_tcp("read %d bytes status %d\n", skb->len - offset, status);
+ return total_consumed;
}
-static void
-iscsi_tcp_data_ready(struct sock *sk, int flag)
+static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
{
struct iscsi_conn *conn = sk->sk_user_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
@@ -1019,7 +106,7 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
read_unlock(&sk->sk_callback_lock);
@@ -1028,10 +115,10 @@ iscsi_tcp_data_ready(struct sock *sk, int flag)
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
}
-static void
-iscsi_tcp_state_change(struct sock *sk)
+static void iscsi_sw_tcp_state_change(struct sock *sk)
{
struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
struct iscsi_conn *conn;
struct iscsi_session *session;
void (*old_state_change)(struct sock *);
@@ -1049,7 +136,8 @@ iscsi_tcp_state_change(struct sock *sk)
}
tcp_conn = conn->dd_data;
- old_state_change = tcp_conn->old_state_change;
+ tcp_sw_conn = tcp_conn->dd_data;
+ old_state_change = tcp_sw_conn->old_state_change;
read_unlock(&sk->sk_callback_lock);
@@ -1060,65 +148,127 @@ iscsi_tcp_state_change(struct sock *sk)
* iscsi_write_space - Called when more output buffer space is available
* @sk: socket space is available for
**/
-static void
-iscsi_write_space(struct sock *sk)
+static void iscsi_sw_tcp_write_space(struct sock *sk)
{
struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- tcp_conn->old_write_space(sk);
+ tcp_sw_conn->old_write_space(sk);
debug_tcp("iscsi_write_space: cid %d\n", conn->id);
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-static void
-iscsi_conn_set_callbacks(struct iscsi_conn *conn)
+static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct sock *sk = tcp_conn->sock->sk;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
/* assign new callbacks */
write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = conn;
- tcp_conn->old_data_ready = sk->sk_data_ready;
- tcp_conn->old_state_change = sk->sk_state_change;
- tcp_conn->old_write_space = sk->sk_write_space;
- sk->sk_data_ready = iscsi_tcp_data_ready;
- sk->sk_state_change = iscsi_tcp_state_change;
- sk->sk_write_space = iscsi_write_space;
+ tcp_sw_conn->old_data_ready = sk->sk_data_ready;
+ tcp_sw_conn->old_state_change = sk->sk_state_change;
+ tcp_sw_conn->old_write_space = sk->sk_write_space;
+ sk->sk_data_ready = iscsi_sw_tcp_data_ready;
+ sk->sk_state_change = iscsi_sw_tcp_state_change;
+ sk->sk_write_space = iscsi_sw_tcp_write_space;
write_unlock_bh(&sk->sk_callback_lock);
}
static void
-iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_sw_tcp_conn *tcp_sw_conn)
{
- struct sock *sk = tcp_conn->sock->sk;
+ struct sock *sk = tcp_sw_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
- sk->sk_data_ready = tcp_conn->old_data_ready;
- sk->sk_state_change = tcp_conn->old_state_change;
- sk->sk_write_space = tcp_conn->old_write_space;
+ sk->sk_data_ready = tcp_sw_conn->old_data_ready;
+ sk->sk_state_change = tcp_sw_conn->old_state_change;
+ sk->sk_write_space = tcp_sw_conn->old_write_space;
sk->sk_no_check = 0;
write_unlock_bh(&sk->sk_callback_lock);
}
/**
- * iscsi_xmit - TCP transmit
+ * iscsi_sw_tcp_xmit_segment - transmit segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to transmnit
+ *
+ * This function transmits as much of the buffer as
+ * the network layer will accept, and returns the number of
+ * bytes transmitted.
+ *
+ * If CRC hashing is enabled, the function will compute the
+ * hash as it goes. When the entire segment has been transmitted,
+ * it will retrieve the hash value and send it as well.
+ */
+static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sk = tcp_sw_conn->sock;
+ unsigned int copied = 0;
+ int r = 0;
+
+ while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
+ struct scatterlist *sg;
+ unsigned int offset, copy;
+ int flags = 0;
+
+ r = 0;
+ offset = segment->copied;
+ copy = segment->size - offset;
+
+ if (segment->total_copied + segment->size < segment->total_size)
+ flags |= MSG_MORE;
+
+ /* Use sendpage if we can; else fall back to sendmsg */
+ if (!segment->data) {
+ sg = segment->sg;
+ offset += segment->sg_offset + sg->offset;
+ r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
+ copy, flags);
+ } else {
+ struct msghdr msg = { .msg_flags = flags };
+ struct kvec iov = {
+ .iov_base = segment->data + offset,
+ .iov_len = copy
+ };
+
+ r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
+ }
+
+ if (r < 0) {
+ iscsi_tcp_segment_unmap(segment);
+ if (copied || r == -EAGAIN)
+ break;
+ return r;
+ }
+ copied += r;
+ }
+ return copied;
+}
+
+/**
+ * iscsi_sw_tcp_xmit - TCP transmit
**/
-static int
-iscsi_xmit(struct iscsi_conn *conn)
+static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_segment *segment = &tcp_conn->out.segment;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
unsigned int consumed = 0;
int rc = 0;
while (1) {
- rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
- if (rc < 0)
+ rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
+ if (rc < 0) {
+ rc = ISCSI_ERR_XMIT_FAILED;
goto error;
+ }
if (rc == 0)
break;
@@ -1127,7 +277,7 @@ iscsi_xmit(struct iscsi_conn *conn)
if (segment->total_copied >= segment->total_size) {
if (segment->done != NULL) {
rc = segment->done(tcp_conn, segment);
- if (rc < 0)
+ if (rc != 0)
goto error;
}
}
@@ -1142,29 +292,29 @@ error:
/* Transmit error. We could initiate error recovery
* here. */
debug_tcp("Error sending PDU, errno=%d\n", rc);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- return rc;
+ iscsi_conn_failure(conn, rc);
+ return -EIO;
}
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
*/
-static inline int
-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_segment *segment = &tcp_conn->out.segment;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
return segment->total_copied - segment->total_size;
}
-static inline int
-iscsi_tcp_flush(struct iscsi_conn *conn)
+static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
+ struct iscsi_conn *conn = task->conn;
int rc;
- while (iscsi_tcp_xmit_qlen(conn)) {
- rc = iscsi_xmit(conn);
+ while (iscsi_sw_tcp_xmit_qlen(conn)) {
+ rc = iscsi_sw_tcp_xmit(conn);
if (rc == 0)
return -EAGAIN;
if (rc < 0)
@@ -1178,27 +328,31 @@ iscsi_tcp_flush(struct iscsi_conn *conn)
* This is called when we're done sending the header.
* Simply copy the data_segment to the send segment, and return.
*/
-static int
-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
- struct iscsi_segment *segment)
+static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
{
- tcp_conn->out.segment = tcp_conn->out.data_segment;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+
+ tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
debug_tcp("Header done. Next segment size %u total_size %u\n",
- tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+ tcp_sw_conn->out.segment.size,
+ tcp_sw_conn->out.segment.total_size);
return 0;
}
-static void
-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
+ size_t hdrlen)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
conn->hdrdgst_en? ", digest enabled" : "");
/* Clear the data segment - needs to be filled in by the
* caller using iscsi_tcp_send_data_prep() */
- memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+ memset(&tcp_sw_conn->out.data_segment, 0,
+ sizeof(struct iscsi_segment));
/* If header digest is enabled, compute the CRC and
* place the digest into the same buffer. We make
@@ -1206,7 +360,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+ iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -1214,10 +368,10 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
/* Remember header pointer for later, when we need
* to decide whether there's a payload to go along
* with the header. */
- tcp_conn->out.hdr = hdr;
+ tcp_sw_conn->out.hdr = hdr;
- iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
- iscsi_tcp_send_hdr_done, NULL);
+ iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
+ iscsi_sw_tcp_send_hdr_done, NULL);
}
/*
@@ -1226,11 +380,12 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
* of by the iscsi_segment routines.
*/
static int
-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
- unsigned int count, unsigned int offset,
- unsigned int len)
+iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ unsigned int count, unsigned int offset,
+ unsigned int len)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len;
@@ -1240,22 +395,23 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
/* Make sure the datalen matches what the caller
said he would send. */
- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_conn->tx_hash;
+ tx_hash = &tcp_sw_conn->tx_hash;
- return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
- sg, count, offset, len,
- NULL, tx_hash);
+ return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
+ sg, count, offset, len,
+ NULL, tx_hash);
}
static void
-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
size_t len)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len;
@@ -1264,341 +420,160 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
/* Make sure the datalen matches what the caller
said he would send. */
- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+ hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_conn->tx_hash;
+ tx_hash = &tcp_sw_conn->tx_hash;
- iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+ iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
data, len, NULL, tx_hash);
}
-/**
- * iscsi_solicit_data_cont - initialize next Data-Out
- * @conn: iscsi connection
- * @task: scsi command task
- * @r2t: R2T info
- * @left: bytes left to transfer
- *
- * Notes:
- * Initialize next Data-Out within this R2T sequence and continue
- * to process next Scatter-Gather element(if any) of this SCSI command.
- *
- * Called under connection lock.
- **/
-static int
-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
- struct iscsi_r2t_info *r2t)
+static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
+ unsigned int offset, unsigned int count)
{
- struct iscsi_data *hdr;
- int new_offset, left;
-
- BUG_ON(r2t->data_length - r2t->sent < 0);
- left = r2t->data_length - r2t->sent;
- if (left == 0)
- return 0;
-
- hdr = &r2t->dtask.hdr;
- memset(hdr, 0, sizeof(struct iscsi_data));
- hdr->ttt = r2t->ttt;
- hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
- r2t->solicit_datasn++;
- hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
- hdr->itt = task->hdr->itt;
- hdr->exp_statsn = r2t->exp_statsn;
- new_offset = r2t->data_offset + r2t->sent;
- hdr->offset = cpu_to_be32(new_offset);
- if (left > conn->max_xmit_dlength) {
- hton24(hdr->dlength, conn->max_xmit_dlength);
- r2t->data_count = conn->max_xmit_dlength;
- } else {
- hton24(hdr->dlength, left);
- r2t->data_count = left;
- hdr->flags = ISCSI_FLAG_CMD_FINAL;
- }
-
- conn->dataout_pdus_cnt++;
- return 1;
-}
-
-/**
- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
- * @task: scsi command task
- * @sc: scsi command
- **/
-static int
-iscsi_tcp_task_init(struct iscsi_task *task)
-{
- struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_conn *conn = task->conn;
- struct scsi_cmnd *sc = task->sc;
- int err;
+ int err = 0;
- if (!sc) {
- /*
- * mgmt tasks do not have a scatterlist since they come
- * in from the iscsi interface.
- */
- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
- task->itt);
-
- /* Prepare PDU, optionally w/ immediate data */
- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
-
- /* If we have immediate data, attach a payload */
- if (task->data_count)
- iscsi_tcp_send_linear_data_prepare(conn, task->data,
- task->data_count);
- return 0;
- }
+ iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
- tcp_task->sent = 0;
- tcp_task->exp_datasn = 0;
+ if (!count)
+ return 0;
- /* Prepare PDU, optionally w/ immediate data */
- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
- conn->id, task->itt, task->imm_count,
- task->unsol_count);
- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
+ if (!task->sc)
+ iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
+ else {
+ struct scsi_data_buffer *sdb = scsi_out(task->sc);
- if (!task->imm_count)
- return 0;
+ err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
+ sdb->table.nents, offset,
+ count);
+ }
- /* If we have immediate data, attach a payload */
- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
- scsi_out(sc)->table.nents,
- 0, task->imm_count);
- if (err)
- return err;
- tcp_task->sent += task->imm_count;
- task->imm_count = 0;
+ if (err) {
+ iscsi_conn_failure(conn, err);
+ return -EIO;
+ }
return 0;
}
-/*
- * iscsi_tcp_task_xmit - xmit normal PDU task
- * @task: iscsi command task
- *
- * We're expected to return 0 when everything was transmitted succesfully,
- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
- * of error.
- */
-static int
-iscsi_tcp_task_xmit(struct iscsi_task *task)
+static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
- struct iscsi_conn *conn = task->conn;
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct scsi_cmnd *sc = task->sc;
- struct scsi_data_buffer *sdb;
- int rc = 0;
-
-flush:
- /* Flush any pending data first. */
- rc = iscsi_tcp_flush(conn);
- if (rc < 0)
- return rc;
-
- /* mgmt command */
- if (!sc) {
- if (task->hdr->itt == RESERVED_ITT)
- iscsi_put_task(task);
- return 0;
- }
-
- /* Are we done already? */
- if (sc->sc_data_direction != DMA_TO_DEVICE)
- return 0;
- sdb = scsi_out(sc);
- if (task->unsol_count != 0) {
- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
-
- /* Prepare a header for the unsolicited PDU.
- * The amount of data we want to send will be
- * in task->data_count.
- * FIXME: return the data count instead.
- */
- iscsi_prep_unsolicit_data_pdu(task, hdr);
-
- debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
- task->itt, tcp_task->sent, task->data_count);
-
- iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
- sdb->table.nents, tcp_task->sent,
- task->data_count);
- if (rc)
- goto fail;
- tcp_task->sent += task->data_count;
- task->unsol_count -= task->data_count;
- goto flush;
- } else {
- struct iscsi_session *session = conn->session;
- struct iscsi_r2t_info *r2t;
-
- /* All unsolicited PDUs sent. Check for solicited PDUs.
- */
- spin_lock_bh(&session->lock);
- r2t = tcp_task->r2t;
- if (r2t != NULL) {
- /* Continue with this R2T? */
- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
- debug_scsi(" done with r2t %p\n", r2t);
-
- __kfifo_put(tcp_task->r2tpool.queue,
- (void*)&r2t, sizeof(void*));
- tcp_task->r2t = r2t = NULL;
- }
- }
-
- if (r2t == NULL) {
- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
- sizeof(void*));
- r2t = tcp_task->r2t;
- }
- spin_unlock_bh(&session->lock);
-
- /* Waiting for more R2Ts to arrive. */
- if (r2t == NULL) {
- debug_tcp("no R2Ts yet\n");
- return 0;
- }
-
- debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
- r2t, r2t->solicit_datasn - 1, task->itt,
- r2t->data_offset + r2t->sent, r2t->data_count);
-
- iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
- sizeof(struct iscsi_hdr));
-
- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
- sdb->table.nents,
- r2t->data_offset + r2t->sent,
- r2t->data_count);
- if (rc)
- goto fail;
- tcp_task->sent += r2t->data_count;
- r2t->sent += r2t->data_count;
- goto flush;
- }
+ task->hdr = task->dd_data + sizeof(*tcp_task);
+ task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
return 0;
-fail:
- iscsi_conn_failure(conn, rc);
- return -EIO;
}
static struct iscsi_cls_conn *
-iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
+ uint32_t conn_idx)
{
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+ cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
+ conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- /*
- * due to strange issues with iser these are not set
- * in iscsi_conn_setup
- */
- conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
-
tcp_conn = conn->dd_data;
- tcp_conn->iscsi_conn = conn;
+ tcp_sw_conn = tcp_conn->dd_data;
- tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_conn->tx_hash.flags = 0;
- if (IS_ERR(tcp_conn->tx_hash.tfm))
+ tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_sw_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
goto free_conn;
- tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_conn->rx_hash.flags = 0;
- if (IS_ERR(tcp_conn->rx_hash.tfm))
+ tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_sw_conn->rx_hash.flags = 0;
+ if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
goto free_tx_tfm;
+ tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
return cls_conn;
free_tx_tfm:
- crypto_free_hash(tcp_conn->tx_hash.tfm);
+ crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
free_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
"loading error. Make sure the crc32c "
"module is built as a module or into the "
"kernel\n");
- iscsi_conn_teardown(cls_conn);
+ iscsi_tcp_conn_teardown(cls_conn);
return NULL;
}
-static void
-iscsi_tcp_release_conn(struct iscsi_conn *conn)
+static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct socket *sock = tcp_conn->sock;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sock = tcp_sw_conn->sock;
if (!sock)
return;
sock_hold(sock->sk);
- iscsi_conn_restore_callbacks(tcp_conn);
+ iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
sock_put(sock->sk);
spin_lock_bh(&session->lock);
- tcp_conn->sock = NULL;
+ tcp_sw_conn->sock = NULL;
spin_unlock_bh(&session->lock);
sockfd_put(sock);
}
-static void
-iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- iscsi_tcp_release_conn(conn);
+ iscsi_sw_tcp_release_conn(conn);
- if (tcp_conn->tx_hash.tfm)
- crypto_free_hash(tcp_conn->tx_hash.tfm);
- if (tcp_conn->rx_hash.tfm)
- crypto_free_hash(tcp_conn->rx_hash.tfm);
+ if (tcp_sw_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+ if (tcp_sw_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
- iscsi_conn_teardown(cls_conn);
+ iscsi_tcp_conn_teardown(cls_conn);
}
-static void
-iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
/* userspace may have goofed up and not bound us */
- if (!tcp_conn->sock)
+ if (!tcp_sw_conn->sock)
return;
/*
* Make sure our recv side is stopped.
* Older tools called conn stop before ep_disconnect
* so IO could still be coming in.
*/
- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+ write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+ write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
iscsi_conn_stop(cls_conn, flag);
- iscsi_tcp_release_conn(conn);
+ iscsi_sw_tcp_release_conn(conn);
}
-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
- char *buf, int *port,
- int (*getname)(struct socket *, struct sockaddr *,
- int *addrlen))
+static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+ char *buf, int *port,
+ int (*getname)(struct socket *,
+ struct sockaddr *,
+ int *addrlen))
{
struct sockaddr_storage *addr;
struct sockaddr_in6 *sin6;
@@ -1618,14 +593,14 @@ static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
case AF_INET:
sin = (struct sockaddr_in *)addr;
spin_lock_bh(&conn->session->lock);
- sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+ sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
*port = be16_to_cpu(sin->sin_port);
spin_unlock_bh(&conn->session->lock);
break;
case AF_INET6:
sin6 = (struct sockaddr_in6 *)addr;
spin_lock_bh(&conn->session->lock);
- sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+ sprintf(buf, "%pI6", &sin6->sin6_addr);
*port = be16_to_cpu(sin6->sin6_port);
spin_unlock_bh(&conn->session->lock);
break;
@@ -1636,14 +611,15 @@ free_addr:
}
static int
-iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
- struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
- int is_leading)
+iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct sock *sk;
struct socket *sock;
int err;
@@ -1660,13 +636,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
* userspace may still want to query the values since we will
* be using them for the reconnect
*/
- err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
- &conn->portal_port, kernel_getpeername);
+ err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
+ &conn->portal_port, kernel_getpeername);
if (err)
goto free_socket;
- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
- &ihost->local_port, kernel_getsockname);
+ err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
+ &ihost->local_port, kernel_getsockname);
if (err)
goto free_socket;
@@ -1675,7 +651,7 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
goto free_socket;
/* bind iSCSI connection and socket */
- tcp_conn->sock = sock;
+ tcp_sw_conn->sock = sock;
/* setup Socket parameters */
sk = sock->sk;
@@ -1683,8 +659,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
- iscsi_conn_set_callbacks(conn);
- tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ iscsi_sw_tcp_conn_set_callbacks(conn);
+ tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
/*
* set receive state machine into initial state
*/
@@ -1696,74 +672,14 @@ free_socket:
return err;
}
-static int
-iscsi_r2tpool_alloc(struct iscsi_session *session)
-{
- int i;
- int cmd_i;
-
- /*
- * initialize per-task: R2T pool and xmit queue
- */
- for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
- struct iscsi_task *task = session->cmds[cmd_i];
- struct iscsi_tcp_task *tcp_task = task->dd_data;
-
- /*
- * pre-allocated x4 as much r2ts to handle race when
- * target acks DataOut faster than we data_xmit() queues
- * could replenish r2tqueue.
- */
-
- /* R2T pool */
- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
- sizeof(struct iscsi_r2t_info))) {
- goto r2t_alloc_fail;
- }
-
- /* R2T xmit queue */
- tcp_task->r2tqueue = kfifo_alloc(
- session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
- iscsi_pool_free(&tcp_task->r2tpool);
- goto r2t_alloc_fail;
- }
- }
-
- return 0;
-
-r2t_alloc_fail:
- for (i = 0; i < cmd_i; i++) {
- struct iscsi_task *task = session->cmds[i];
- struct iscsi_tcp_task *tcp_task = task->dd_data;
-
- kfifo_free(tcp_task->r2tqueue);
- iscsi_pool_free(&tcp_task->r2tpool);
- }
- return -ENOMEM;
-}
-
-static void
-iscsi_r2tpool_free(struct iscsi_session *session)
-{
- int i;
-
- for (i = 0; i < session->cmds_max; i++) {
- struct iscsi_task *task = session->cmds[i];
- struct iscsi_tcp_task *tcp_task = task->dd_data;
-
- kfifo_free(tcp_task->r2tqueue);
- iscsi_pool_free(&tcp_task->r2tpool);
- }
-}
-
-static int
-iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
- char *buf, int buflen)
+static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf,
+ int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int value;
switch(param) {
@@ -1772,8 +688,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
- tcp_conn->sendpage = conn->datadgst_en ?
- sock_no_sendpage : tcp_conn->sock->ops->sendpage;
+ tcp_sw_conn->sendpage = conn->datadgst_en ?
+ sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &value);
@@ -1781,9 +697,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
return -EINVAL;
if (session->max_r2t == value)
break;
- iscsi_r2tpool_free(session);
+ iscsi_tcp_r2tpool_free(session);
iscsi_set_param(cls_conn, param, buf, buflen);
- if (iscsi_r2tpool_alloc(session))
+ if (iscsi_tcp_r2tpool_alloc(session))
return -ENOMEM;
break;
default:
@@ -1793,9 +709,8 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
return 0;
}
-static int
-iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
int len;
@@ -1819,48 +734,42 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
static void
-iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- stats->txdata_octets = conn->txdata_octets;
- stats->rxdata_octets = conn->rxdata_octets;
- stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
- stats->dataout_pdus = conn->dataout_pdus_cnt;
- stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
- stats->datain_pdus = conn->datain_pdus_cnt;
- stats->r2t_pdus = conn->r2t_pdus_cnt;
- stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
- stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->custom_length = 3;
strcpy(stats->custom[0].desc, "tx_sendpage_failures");
- stats->custom[0].value = tcp_conn->sendpage_failures_cnt;
+ stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
- stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt;
+ stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
strcpy(stats->custom[2].desc, "eh_abort_cnt");
stats->custom[2].value = conn->eh_abort_cnt;
+
+ iscsi_tcp_conn_get_stats(cls_conn, stats);
}
static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
- uint16_t qdepth, uint32_t initial_cmdsn,
- uint32_t *hostno)
+iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ uint16_t qdepth, uint32_t initial_cmdsn,
+ uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct Scsi_Host *shost;
- int cmd_i;
if (ep) {
printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
return NULL;
}
- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, qdepth);
if (!shost)
return NULL;
- shost->transportt = iscsi_tcp_scsi_transport;
+ shost->transportt = iscsi_sw_tcp_scsi_transport;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
@@ -1870,23 +779,17 @@ iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
goto free_host;
*hostno = shost->host_no;
- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
- sizeof(struct iscsi_tcp_task),
+ cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
+ cmds_max,
+ sizeof(struct iscsi_tcp_task) +
+ sizeof(struct iscsi_sw_tcp_hdrbuf),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
shost->can_queue = session->scsi_cmds_max;
- for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
- struct iscsi_task *task = session->cmds[cmd_i];
- struct iscsi_tcp_task *tcp_task = task->dd_data;
-
- task->hdr = &tcp_task->hdr.cmd_hdr;
- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
- }
-
- if (iscsi_r2tpool_alloc(session))
+ if (iscsi_tcp_r2tpool_alloc(session))
goto remove_session;
return cls_session;
@@ -1899,24 +802,25 @@ free_host:
return NULL;
}
-static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- iscsi_r2tpool_free(cls_session->dd_data);
+ iscsi_tcp_r2tpool_free(cls_session->dd_data);
+ iscsi_session_teardown(cls_session);
iscsi_host_remove(shost);
iscsi_host_free(shost);
}
-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev)
{
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
blk_queue_dma_alignment(sdev->request_queue, 0);
return 0;
}
-static struct scsi_host_template iscsi_sht = {
+static struct scsi_host_template iscsi_sw_tcp_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
@@ -1927,14 +831,14 @@ static struct scsi_host_template iscsi_sht = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
- .eh_host_reset_handler = iscsi_eh_host_reset,
+ .eh_target_reset_handler= iscsi_eh_target_reset,
.use_clustering = DISABLE_CLUSTERING,
- .slave_configure = iscsi_tcp_slave_configure,
+ .slave_configure = iscsi_sw_tcp_slave_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
};
-static struct iscsi_transport iscsi_tcp_transport = {
+static struct iscsi_transport iscsi_sw_tcp_transport = {
.owner = THIS_MODULE,
.name = "tcp",
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
@@ -1967,32 +871,36 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_HOST_INITIATOR_NAME |
ISCSI_HOST_NETDEV_NAME,
/* session management */
- .create_session = iscsi_tcp_session_create,
- .destroy_session = iscsi_tcp_session_destroy,
+ .create_session = iscsi_sw_tcp_session_create,
+ .destroy_session = iscsi_sw_tcp_session_destroy,
/* connection management */
- .create_conn = iscsi_tcp_conn_create,
- .bind_conn = iscsi_tcp_conn_bind,
- .destroy_conn = iscsi_tcp_conn_destroy,
- .set_param = iscsi_conn_set_param,
- .get_conn_param = iscsi_tcp_conn_get_param,
+ .create_conn = iscsi_sw_tcp_conn_create,
+ .bind_conn = iscsi_sw_tcp_conn_bind,
+ .destroy_conn = iscsi_sw_tcp_conn_destroy,
+ .set_param = iscsi_sw_tcp_conn_set_param,
+ .get_conn_param = iscsi_sw_tcp_conn_get_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_conn_start,
- .stop_conn = iscsi_tcp_conn_stop,
+ .stop_conn = iscsi_sw_tcp_conn_stop,
/* iscsi host params */
.get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
- .get_stats = iscsi_conn_get_stats,
+ .get_stats = iscsi_sw_tcp_conn_get_stats,
+ /* iscsi task/cmd helpers */
.init_task = iscsi_tcp_task_init,
.xmit_task = iscsi_tcp_task_xmit,
.cleanup_task = iscsi_tcp_cleanup_task,
+ /* low level pdu helpers */
+ .xmit_pdu = iscsi_sw_tcp_pdu_xmit,
+ .init_pdu = iscsi_sw_tcp_pdu_init,
+ .alloc_pdu = iscsi_sw_tcp_pdu_alloc,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
-static int __init
-iscsi_tcp_init(void)
+static int __init iscsi_sw_tcp_init(void)
{
if (iscsi_max_lun < 1) {
printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n",
@@ -2000,19 +908,18 @@ iscsi_tcp_init(void)
return -EINVAL;
}
- iscsi_tcp_scsi_transport = iscsi_register_transport(
- &iscsi_tcp_transport);
- if (!iscsi_tcp_scsi_transport)
+ iscsi_sw_tcp_scsi_transport = iscsi_register_transport(
+ &iscsi_sw_tcp_transport);
+ if (!iscsi_sw_tcp_scsi_transport)
return -ENODEV;
return 0;
}
-static void __exit
-iscsi_tcp_exit(void)
+static void __exit iscsi_sw_tcp_exit(void)
{
- iscsi_unregister_transport(&iscsi_tcp_transport);
+ iscsi_unregister_transport(&iscsi_sw_tcp_transport);
}
-module_init(iscsi_tcp_init);
-module_exit(iscsi_tcp_exit);
+module_init(iscsi_sw_tcp_init);
+module_exit(iscsi_sw_tcp_exit);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 498d8ca39848..ca6b7bc64de0 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -19,67 +19,27 @@
* See the file COPYING included with this distribution for more details.
*/
-#ifndef ISCSI_TCP_H
-#define ISCSI_TCP_H
+#ifndef ISCSI_SW_TCP_H
+#define ISCSI_SW_TCP_H
#include <scsi/libiscsi.h>
+#include <scsi/libiscsi_tcp.h>
-struct crypto_hash;
struct socket;
struct iscsi_tcp_conn;
-struct iscsi_segment;
-
-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
- struct iscsi_segment *);
-
-struct iscsi_segment {
- unsigned char *data;
- unsigned int size;
- unsigned int copied;
- unsigned int total_size;
- unsigned int total_copied;
-
- struct hash_desc *hash;
- unsigned char recv_digest[ISCSI_DIGEST_SIZE];
- unsigned char digest[ISCSI_DIGEST_SIZE];
- unsigned int digest_len;
-
- struct scatterlist *sg;
- void *sg_mapped;
- unsigned int sg_offset;
-
- iscsi_segment_done_fn_t *done;
-};
-
-/* Socket connection recieve helper */
-struct iscsi_tcp_recv {
- struct iscsi_hdr *hdr;
- struct iscsi_segment segment;
-
- /* Allocate buffer for BHS + AHS */
- uint32_t hdr_buf[64];
-
- /* copied and flipped values */
- int datalen;
-};
/* Socket connection send helper */
-struct iscsi_tcp_send {
+struct iscsi_sw_tcp_send {
struct iscsi_hdr *hdr;
struct iscsi_segment segment;
struct iscsi_segment data_segment;
};
-struct iscsi_tcp_conn {
+struct iscsi_sw_tcp_conn {
struct iscsi_conn *iscsi_conn;
struct socket *sock;
- int stop_stage; /* conn_stop() flag: *
- * stop to recover, *
- * stop to terminate */
- /* control data */
- struct iscsi_tcp_recv in; /* TCP receive context */
- struct iscsi_tcp_send out; /* TCP send context */
+ struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
@@ -93,41 +53,13 @@ struct iscsi_tcp_conn {
uint32_t sendpage_failures_cnt;
uint32_t discontiguous_hdr_cnt;
- int error;
-
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
-struct iscsi_data_task {
- struct iscsi_data hdr; /* PDU */
- char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
-};
-
-struct iscsi_r2t_info {
- __be32 ttt; /* copied from R2T */
- __be32 exp_statsn; /* copied from R2T */
- uint32_t data_length; /* copied from R2T */
- uint32_t data_offset; /* copied from R2T */
- int sent; /* R2T sequence progress */
- int data_count; /* DATA-Out payload progress */
- int solicit_datasn;
- struct iscsi_data_task dtask; /* Data-Out header buf */
-};
-
-struct iscsi_tcp_task {
- struct iscsi_hdr_buff {
- struct iscsi_cmd cmd_hdr;
- char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+struct iscsi_sw_tcp_hdrbuf {
+ struct iscsi_hdr hdrbuf;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
ISCSI_DIGEST_SIZE];
- } hdr;
-
- int sent;
- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
- int data_offset;
- struct iscsi_r2t_info *r2t; /* in progress R2T */
- struct iscsi_pool r2tpool;
- struct kfifo *r2tqueue;
- struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
};
-#endif /* ISCSI_H */
+#endif /* ISCSI_SW_TCP_H */
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000000..55f982de3a9a
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+ fc_disc.o \
+ fc_exch.o \
+ fc_elsct.o \
+ fc_frame.o \
+ fc_lport.o \
+ fc_rport.o \
+ fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000000..dd1564c9e04a
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ *
+ * This block discovers all FC-4 remote ports, including FCP initiators. It
+ * also handles RSCN events and re-discovery if necessary.
+ */
+
+/*
+ * DISC LOCKING
+ *
+ * The disc mutex is can be locked when acquiring rport locks, but may not
+ * be held when acquiring the lport lock. Refer to fc_lport.c for more
+ * details.
+ */
+
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+
+#define FC_DISC_RETRY_LIMIT 3 /* max retries */
+#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
+
+#define FC_DISC_DELAY 3
+
+static int fc_disc_debug;
+
+#define FC_DEBUG_DISC(fmt...) \
+ do { \
+ if (fc_disc_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+static void fc_disc_gpn_ft_req(struct fc_disc *);
+static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
+ struct fc_rport_identifiers *);
+static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
+static void fc_disc_done(struct fc_disc *);
+static void fc_disc_timeout(struct work_struct *);
+static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
+static void fc_disc_restart(struct fc_disc *);
+
+/**
+ * fc_disc_lookup_rport - lookup a remote port by port_id
+ * @lport: Fibre Channel host port instance
+ * @port_id: remote port port_id to match
+ */
+struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
+ u32 port_id)
+{
+ const struct fc_disc *disc = &lport->disc;
+ struct fc_rport *rport, *found = NULL;
+ struct fc_rport_libfc_priv *rdata;
+ int disc_found = 0;
+
+ list_for_each_entry(rdata, &disc->rports, peers) {
+ rport = PRIV_TO_RPORT(rdata);
+ if (rport->port_id == port_id) {
+ disc_found = 1;
+ found = rport;
+ break;
+ }
+ }
+
+ if (!disc_found)
+ found = NULL;
+
+ return found;
+}
+
+/**
+ * fc_disc_stop_rports - delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop rports on
+ *
+ * Locking Note: This function expects that the lport mutex is locked before
+ * calling it.
+ */
+void fc_disc_stop_rports(struct fc_disc *disc)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata, *next;
+
+ lport = disc->lport;
+
+ mutex_lock(&disc->disc_mutex);
+ list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
+ rport = PRIV_TO_RPORT(rdata);
+ list_del(&rdata->peers);
+ lport->tt.rport_logoff(rport);
+ }
+
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+ *
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+static void fc_disc_rport_callback(struct fc_lport *lport,
+ struct fc_rport *rport,
+ enum fc_rport_event event)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_disc *disc = &lport->disc;
+ int found = 0;
+
+ FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
+ rport->port_id);
+
+ if (event == RPORT_EV_CREATED) {
+ if (disc) {
+ found = 1;
+ mutex_lock(&disc->disc_mutex);
+ list_add_tail(&rdata->peers, &disc->rports);
+ mutex_unlock(&disc->disc_mutex);
+ }
+ }
+
+ if (!found)
+ FC_DEBUG_DISC("The rport (%6x) is not maintained "
+ "by the discovery layer\n", rport->port_id);
+}
+
+/**
+ * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
+ * @sp: Current sequence of the RSCN exchange
+ * @fp: RSCN Frame
+ * @lport: Fibre Channel host port instance
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_disc *disc)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata;
+ struct fc_els_rscn *rp;
+ struct fc_els_rscn_page *pp;
+ struct fc_seq_els_data rjt_data;
+ unsigned int len;
+ int redisc = 0;
+ enum fc_els_rscn_ev_qual ev_qual;
+ enum fc_els_rscn_addr_fmt fmt;
+ LIST_HEAD(disc_ports);
+ struct fc_disc_port *dp, *next;
+
+ lport = disc->lport;
+
+ FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+ /* make sure the frame contains an RSCN message */
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ if (!rp)
+ goto reject;
+ /* make sure the page length is as expected (4 bytes) */
+ if (rp->rscn_page_len != sizeof(*pp))
+ goto reject;
+ /* get the RSCN payload length */
+ len = ntohs(rp->rscn_plen);
+ if (len < sizeof(*rp))
+ goto reject;
+ /* make sure the frame contains the expected payload */
+ rp = fc_frame_payload_get(fp, len);
+ if (!rp)
+ goto reject;
+ /* payload must be a multiple of the RSCN page size */
+ len -= sizeof(*rp);
+ if (len % sizeof(*pp))
+ goto reject;
+
+ for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
+ ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+ ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+ fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+ fmt &= ELS_RSCN_ADDR_FMT_MASK;
+ /*
+ * if we get an address format other than port
+ * (area, domain, fabric), then do a full discovery
+ */
+ switch (fmt) {
+ case ELS_ADDR_FMT_PORT:
+ FC_DEBUG_DISC("Port address format for port (%6x)\n",
+ ntoh24(pp->rscn_fid));
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp) {
+ redisc = 1;
+ break;
+ }
+ dp->lp = lport;
+ dp->ids.port_id = ntoh24(pp->rscn_fid);
+ dp->ids.port_name = -1;
+ dp->ids.node_name = -1;
+ dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ list_add_tail(&dp->peers, &disc_ports);
+ break;
+ case ELS_ADDR_FMT_AREA:
+ case ELS_ADDR_FMT_DOM:
+ case ELS_ADDR_FMT_FAB:
+ default:
+ FC_DEBUG_DISC("Address format is (%d)\n", fmt);
+ redisc = 1;
+ break;
+ }
+ }
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ if (redisc) {
+ FC_DEBUG_DISC("RSCN received: rediscovering\n");
+ fc_disc_restart(disc);
+ } else {
+ FC_DEBUG_DISC("RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d\n",
+ redisc, lport->state, disc->pending);
+ list_for_each_entry_safe(dp, next, &disc_ports, peers) {
+ list_del(&dp->peers);
+ rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+ if (rport) {
+ rdata = RPORT_TO_PRIV(rport);
+ list_del(&rdata->peers);
+ lport->tt.rport_logoff(rport);
+ }
+ fc_disc_single(disc, dp);
+ }
+ }
+ fc_frame_free(fp);
+ return;
+reject:
+ FC_DEBUG_DISC("Received a bad RSCN frame\n");
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_recv_req - Handle incoming requests
+ * @sp: Current sequence of the request exchange
+ * @fp: The frame
+ * @lport: The FC local port
+ *
+ * Locking Note: This function is called from the EM and will lock
+ * the disc_mutex before calling the handler for the
+ * request.
+ */
+static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lport)
+{
+ u8 op;
+ struct fc_disc *disc = &lport->disc;
+
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+ case ELS_RSCN:
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_recv_rscn_req(sp, fp, disc);
+ mutex_unlock(&disc->disc_mutex);
+ break;
+ default:
+ FC_DBG("Received an unsupported request. opcode (%x)\n", op);
+ break;
+ }
+}
+
+/**
+ * fc_disc_restart - Restart discovery
+ * @lport: FC discovery context
+ *
+ * Locking Note: This function expects that the disc mutex
+ * is already locked.
+ */
+static void fc_disc_restart(struct fc_disc *disc)
+{
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata, *next;
+ struct fc_lport *lport = disc->lport;
+
+ FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+ list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
+ rport = PRIV_TO_RPORT(rdata);
+ FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
+ list_del(&rdata->peers);
+ lport->tt.rport_logoff(rport);
+ }
+
+ disc->requested = 1;
+ if (!disc->pending)
+ fc_disc_gpn_ft_req(disc);
+}
+
+/**
+ * fc_disc_start - Fibre Channel Target discovery
+ * @lport: FC local port
+ *
+ * Returns non-zero if discovery cannot be started.
+ */
+static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
+ enum fc_disc_event),
+ struct fc_lport *lport)
+{
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids;
+ struct fc_disc *disc = &lport->disc;
+
+ /*
+ * At this point we may have a new disc job or an existing
+ * one. Either way, let's lock when we make changes to it
+ * and send the GPN_FT request.
+ */
+ mutex_lock(&disc->disc_mutex);
+
+ disc->disc_callback = disc_callback;
+
+ /*
+ * If not ready, or already running discovery, just set request flag.
+ */
+ disc->requested = 1;
+
+ if (disc->pending) {
+ mutex_unlock(&disc->disc_mutex);
+ return;
+ }
+
+ /*
+ * Handle point-to-point mode as a simple discovery
+ * of the remote port. Yucky, yucky, yuck, yuck!
+ */
+ rport = disc->lport->ptp_rp;
+ if (rport) {
+ ids.port_id = rport->port_id;
+ ids.port_name = rport->port_name;
+ ids.node_name = rport->node_name;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ get_device(&rport->dev);
+
+ if (!fc_disc_new_target(disc, rport, &ids)) {
+ disc->event = DISC_EV_SUCCESS;
+ fc_disc_done(disc);
+ }
+ put_device(&rport->dev);
+ } else {
+ fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
+ }
+
+ mutex_unlock(&disc->disc_mutex);
+}
+
+static struct fc_rport_operations fc_disc_rport_ops = {
+ .event_callback = fc_disc_rport_callback,
+};
+
+/**
+ * fc_disc_new_target - Handle new target found by discovery
+ * @lport: FC local port
+ * @rport: The previous FC remote port (NULL if new remote port)
+ * @ids: Identifiers for the new FC remote port
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static int fc_disc_new_target(struct fc_disc *disc,
+ struct fc_rport *rport,
+ struct fc_rport_identifiers *ids)
+{
+ struct fc_lport *lport = disc->lport;
+ struct fc_rport_libfc_priv *rp;
+ int error = 0;
+
+ if (rport && ids->port_name) {
+ if (rport->port_name == -1) {
+ /*
+ * Set WWN and fall through to notify of create.
+ */
+ fc_rport_set_name(rport, ids->port_name,
+ rport->node_name);
+ } else if (rport->port_name != ids->port_name) {
+ /*
+ * This is a new port with the same FCID as
+ * a previously-discovered port. Presumably the old
+ * port logged out and a new port logged in and was
+ * assigned the same FCID. This should be rare.
+ * Delete the old one and fall thru to re-create.
+ */
+ fc_disc_del_target(disc, rport);
+ rport = NULL;
+ }
+ }
+ if (((ids->port_name != -1) || (ids->port_id != -1)) &&
+ ids->port_id != fc_host_port_id(lport->host) &&
+ ids->port_name != lport->wwpn) {
+ if (!rport) {
+ rport = lport->tt.rport_lookup(lport, ids->port_id);
+ if (!rport) {
+ struct fc_disc_port dp;
+ dp.lp = lport;
+ dp.ids.port_id = ids->port_id;
+ dp.ids.port_name = ids->port_name;
+ dp.ids.node_name = ids->node_name;
+ dp.ids.roles = ids->roles;
+ rport = fc_rport_rogue_create(&dp);
+ }
+ if (!rport)
+ error = -ENOMEM;
+ }
+ if (rport) {
+ rp = rport->dd_data;
+ rp->ops = &fc_disc_rport_ops;
+ rp->rp_state = RPORT_ST_INIT;
+ lport->tt.rport_login(rport);
+ }
+ }
+ return error;
+}
+
+/**
+ * fc_disc_del_target - Delete a target
+ * @disc: FC discovery context
+ * @rport: The remote port to be removed
+ */
+static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
+{
+ struct fc_lport *lport = disc->lport;
+ struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
+ list_del(&rdata->peers);
+ lport->tt.rport_logoff(rport);
+}
+
+/**
+ * fc_disc_done - Discovery has been completed
+ * @disc: FC discovery context
+ */
+static void fc_disc_done(struct fc_disc *disc)
+{
+ struct fc_lport *lport = disc->lport;
+
+ FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+ disc->disc_callback(lport, disc->event);
+ disc->event = DISC_EV_NONE;
+
+ if (disc->requested)
+ fc_disc_gpn_ft_req(disc);
+ else
+ disc->pending = 0;
+}
+
+/**
+ * fc_disc_error - Handle error on dNS request
+ * @disc: FC discovery context
+ * @fp: The frame pointer
+ */
+static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
+{
+ struct fc_lport *lport = disc->lport;
+ unsigned long delay = 0;
+ if (fc_disc_debug)
+ FC_DBG("Error %ld, retries %d/%d\n",
+ PTR_ERR(fp), disc->retry_count,
+ FC_DISC_RETRY_LIMIT);
+
+ if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ /*
+ * Memory allocation failure, or the exchange timed out,
+ * retry after delay.
+ */
+ if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
+ /* go ahead and retry */
+ if (!fp)
+ delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
+ else {
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ /* timeout faster first time */
+ if (!disc->retry_count)
+ delay /= 4;
+ }
+ disc->retry_count++;
+ schedule_delayed_work(&disc->disc_work, delay);
+ } else {
+ /* exceeded retries */
+ disc->event = DISC_EV_FAILED;
+ fc_disc_done(disc);
+ }
+ }
+}
+
+/**
+ * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
+ * @lport: FC discovery context
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_gpn_ft_req(struct fc_disc *disc)
+{
+ struct fc_frame *fp;
+ struct fc_lport *lport = disc->lport;
+
+ WARN_ON(!fc_lport_test_ready(lport));
+
+ disc->pending = 1;
+ disc->requested = 0;
+
+ disc->buf_len = 0;
+ disc->seq_count = 0;
+ fp = fc_frame_alloc(lport,
+ sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_gid_ft));
+ if (!fp)
+ goto err;
+
+ if (lport->tt.elsct_send(lport, NULL, fp,
+ FC_NS_GPN_FT,
+ fc_disc_gpn_ft_resp,
+ disc, lport->e_d_tov))
+ return;
+err:
+ fc_disc_error(disc, fp);
+}
+
+/**
+ * fc_disc_gpn_ft_parse - Parse the list of IDs and names resulting from a request
+ * @lport: Fibre Channel host port instance
+ * @buf: GPN_FT response buffer
+ * @len: size of response buffer
+ */
+static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
+{
+ struct fc_lport *lport;
+ struct fc_gpn_ft_resp *np;
+ char *bp;
+ size_t plen;
+ size_t tlen;
+ int error = 0;
+ struct fc_disc_port dp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata;
+
+ lport = disc->lport;
+
+ /*
+ * Handle partial name record left over from previous call.
+ */
+ bp = buf;
+ plen = len;
+ np = (struct fc_gpn_ft_resp *)bp;
+ tlen = disc->buf_len;
+ if (tlen) {
+ WARN_ON(tlen >= sizeof(*np));
+ plen = sizeof(*np) - tlen;
+ WARN_ON(plen <= 0);
+ WARN_ON(plen >= sizeof(*np));
+ if (plen > len)
+ plen = len;
+ np = &disc->partial_buf;
+ memcpy((char *)np + tlen, bp, plen);
+
+ /*
+ * Set bp so that the loop below will advance it to the
+ * first valid full name element.
+ */
+ bp -= tlen;
+ len += tlen;
+ plen += tlen;
+ disc->buf_len = (unsigned char) plen;
+ if (plen == sizeof(*np))
+ disc->buf_len = 0;
+ }
+
+ /*
+ * Handle full name records, including the one filled from above.
+ * Normally, np == bp and plen == len, but from the partial case above,
+ * bp, len describe the overall buffer, and np, plen describe the
+ * partial buffer, which if would usually be full now.
+ * After the first time through the loop, things return to "normal".
+ */
+ while (plen >= sizeof(*np)) {
+ dp.lp = lport;
+ dp.ids.port_id = ntoh24(np->fp_fid);
+ dp.ids.port_name = ntohll(np->fp_wwpn);
+ dp.ids.node_name = -1;
+ dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
+ (dp.ids.port_name != lport->wwpn)) {
+ rport = fc_rport_rogue_create(&dp);
+ if (rport) {
+ rdata = rport->dd_data;
+ rdata->ops = &fc_disc_rport_ops;
+ rdata->local_port = lport;
+ lport->tt.rport_login(rport);
+ } else
+ FC_DBG("Failed to allocate memory for "
+ "the newly discovered port (%6x)\n",
+ dp.ids.port_id);
+ }
+
+ if (np->fp_flags & FC_NS_FID_LAST) {
+ disc->event = DISC_EV_SUCCESS;
+ fc_disc_done(disc);
+ len = 0;
+ break;
+ }
+ len -= sizeof(*np);
+ bp += sizeof(*np);
+ np = (struct fc_gpn_ft_resp *)bp;
+ plen = len;
+ }
+
+ /*
+ * Save any partial record at the end of the buffer for next time.
+ */
+ if (error == 0 && len > 0 && len < sizeof(*np)) {
+ if (np != &disc->partial_buf) {
+ FC_DEBUG_DISC("Partial buffer remains "
+ "for discovery by (%6x)\n",
+ fc_host_port_id(lport->host));
+ memcpy(&disc->partial_buf, np, len);
+ }
+ disc->buf_len = (unsigned char) len;
+ } else {
+ disc->buf_len = 0;
+ }
+ return error;
+}
+
+/*
+ * Handle retry of memory allocation for remote ports.
+ */
+static void fc_disc_timeout(struct work_struct *work)
+{
+ struct fc_disc *disc = container_of(work,
+ struct fc_disc,
+ disc_work.work);
+ mutex_lock(&disc->disc_mutex);
+ if (disc->requested && !disc->pending)
+ fc_disc_gpn_ft_req(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_ft_resp - Handle a response frame from Get Port Names (GPN_FT)
+ * @sp: Current sequence of GPN_FT exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *disc_arg)
+{
+ struct fc_disc *disc = disc_arg;
+ struct fc_ct_hdr *cp;
+ struct fc_frame_header *fh;
+ unsigned int seq_cnt;
+ void *buf = NULL;
+ unsigned int len;
+ int error;
+
+ FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
+ fc_host_port_id(disc->lport->host));
+
+ if (IS_ERR(fp)) {
+ fc_disc_error(disc, fp);
+ return;
+ }
+
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ seq_cnt = ntohs(fh->fh_seq_cnt);
+ if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
+ disc->seq_count == 0) {
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp) {
+ FC_DBG("GPN_FT response too short, len %d\n",
+ fr_len(fp));
+ } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+ /*
+ * Accepted. Parse response.
+ */
+ buf = cp + 1;
+ len -= sizeof(*cp);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DBG("GPN_FT rejected reason %x exp %x "
+ "(check zoning)\n", cp->ct_reason,
+ cp->ct_explan);
+ disc->event = DISC_EV_FAILED;
+ fc_disc_done(disc);
+ } else {
+ FC_DBG("GPN_FT unexpected response code %x\n",
+ ntohs(cp->ct_cmd));
+ }
+ } else if (fr_sof(fp) == FC_SOF_N3 &&
+ seq_cnt == disc->seq_count) {
+ buf = fh + 1;
+ } else {
+ FC_DBG("GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x\n",
+ seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+ }
+ if (buf) {
+ error = fc_disc_gpn_ft_parse(disc, buf, len);
+ if (error)
+ fc_disc_error(disc, fp);
+ else
+ disc->seq_count++;
+ }
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_single - Discover the directory information for a single target
+ * @lport: FC local port
+ * @dp: The port to rediscover
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ * before it is called.
+ */
+static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport;
+ struct fc_rport *new_rport;
+ struct fc_rport_libfc_priv *rdata;
+
+ lport = disc->lport;
+
+ if (dp->ids.port_id == fc_host_port_id(lport->host))
+ goto out;
+
+ rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+ if (rport)
+ fc_disc_del_target(disc, rport);
+
+ new_rport = fc_rport_rogue_create(dp);
+ if (new_rport) {
+ rdata = new_rport->dd_data;
+ rdata->ops = &fc_disc_rport_ops;
+ kfree(dp);
+ lport->tt.rport_login(new_rport);
+ }
+ return;
+out:
+ kfree(dp);
+}
+
+/**
+ * fc_disc_stop - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop for
+ */
+void fc_disc_stop(struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ if (disc) {
+ cancel_delayed_work_sync(&disc->disc_work);
+ fc_disc_stop_rports(disc);
+ }
+}
+
+/**
+ * fc_disc_stop_final - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop for
+ *
+ * This function will block until discovery has been
+ * completely stopped and all rports have been deleted.
+ */
+void fc_disc_stop_final(struct fc_lport *lport)
+{
+ fc_disc_stop(lport);
+ lport->tt.rport_flush_queue();
+}
+
+/**
+ * fc_disc_init - Initialize the discovery block
+ * @lport: FC local port
+ */
+int fc_disc_init(struct fc_lport *lport)
+{
+ struct fc_disc *disc;
+
+ if (!lport->tt.disc_start)
+ lport->tt.disc_start = fc_disc_start;
+
+ if (!lport->tt.disc_stop)
+ lport->tt.disc_stop = fc_disc_stop;
+
+ if (!lport->tt.disc_stop_final)
+ lport->tt.disc_stop_final = fc_disc_stop_final;
+
+ if (!lport->tt.disc_recv_req)
+ lport->tt.disc_recv_req = fc_disc_recv_req;
+
+ if (!lport->tt.rport_lookup)
+ lport->tt.rport_lookup = fc_disc_lookup_rport;
+
+ disc = &lport->disc;
+ INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+ mutex_init(&disc->disc_mutex);
+ INIT_LIST_HEAD(&disc->rports);
+
+ disc->lport = lport;
+ disc->delay = FC_DISC_DELAY;
+ disc->event = DISC_EV_NONE;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000000..dd47fe619d1e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Provide interface to send ELS/CT FC frames
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+/*
+ * fc_elsct_send - sends ELS/CT frame
+ */
+static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
+ struct fc_rport *rport,
+ struct fc_frame *fp,
+ unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void *arg, u32 timer_msec)
+{
+ enum fc_rctl r_ctl;
+ u32 did;
+ enum fc_fh_type fh_type;
+ int rc;
+
+ /* ELS requests */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
+ rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
+ else
+ /* CT requests */
+ rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
+
+ if (rc)
+ return NULL;
+
+ fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+}
+
+int fc_elsct_init(struct fc_lport *lport)
+{
+ if (!lport->tt.elsct_send)
+ lport->tt.elsct_send = fc_elsct_send;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_elsct_init);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000000..66db08a5f27f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1970 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
+
+/*
+ * fc_exch_debug can be set in debugger or at compile time to get more logs.
+ */
+static int fc_exch_debug;
+
+#define FC_DEBUG_EXCH(fmt...) \
+ do { \
+ if (fc_exch_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/*
+ * Exchange manager.
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+ enum fc_class class; /* default class for sequences */
+ spinlock_t em_lock; /* exchange manager lock,
+ must be taken before ex_lock */
+ u16 last_xid; /* last allocated exchange ID */
+ u16 min_xid; /* min exchange ID */
+ u16 max_xid; /* max exchange ID */
+ u16 max_read; /* max exchange ID for read */
+ u16 last_read; /* last xid allocated for read */
+ u32 total_exches; /* total allocated exchanges */
+ struct list_head ex_list; /* allocated exchanges list */
+ struct fc_lport *lp; /* fc device instance */
+ mempool_t *ep_pool; /* reserve ep's */
+
+ /*
+ * currently exchange mgr stats are updated but not used.
+ * either stats can be expose via sysfs or remove them
+ * all together if not used XXX
+ */
+ struct {
+ atomic_t no_free_exch;
+ atomic_t no_free_exch_xid;
+ atomic_t xid_not_found;
+ atomic_t xid_busy;
+ atomic_t seq_not_found;
+ atomic_t non_bls_resp;
+ } stats;
+ struct fc_exch **exches; /* for exch pointers indexed by xid */
+};
+#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_seq *);
+static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
+ enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open." Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session. For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ * Send.
+ * For now, the whole thing is sent.
+ * Receive ACK
+ * This applies only to class F.
+ * The sequence is marked complete.
+ * ULP completion.
+ * The upper layer calls fc_exch_done() when done
+ * with exchange and sequence tuple.
+ * RX-inferred completion.
+ * When we receive the next sequence on the same exchange, we can
+ * retire the previous sequence ID. (XXX not implemented).
+ * Timeout.
+ * R_A_TOV frees the sequence ID. If we're waiting for ACK,
+ * E_D_TOV causes abort and calls upper layer response handler
+ * with FC_EX_TIMEOUT error.
+ * Receive RJT
+ * XXX defer.
+ * Send ABTS
+ * On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ * Receive
+ * Allocate sequence for first frame received.
+ * Hold during receive handler.
+ * Release when final frame received.
+ * Keep status of last N of these for the ELS RES command. XXX TBD.
+ * Receive ABTS
+ * Deallocate sequence
+ * Send RJT
+ * Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ * - exchange refcnt can be done atomicly without locks.
+ * - sequence allocation must be locked by exch lock.
+ * - If the em_lock and ex_lock must be taken at the same time, then the
+ * em_lock must be taken before the ex_lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+ unsigned int max_index)
+{
+ const char *name = NULL;
+
+ if (op < max_index)
+ name = table[op];
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+ return fc_exch_name_lookup(op, fc_exch_rctl_names,
+ FC_TABLE_SIZE(fc_exch_rctl_names));
+}
+
+/*
+ * Hold an exchange - keep it from being freed.
+ */
+static void fc_exch_hold(struct fc_exch *ep)
+{
+ atomic_inc(&ep->ex_refcnt);
+}
+
+/*
+ * setup fc hdr by initializing few more FC header fields and sof/eof.
+ * Initialized fields by this func:
+ * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
+ * - sof and eof
+ */
+static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
+ u32 f_ctl)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ u16 fill;
+
+ fr_sof(fp) = ep->class;
+ if (ep->seq.cnt)
+ fr_sof(fp) = fc_sof_normal(ep->class);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(ep->class))
+ fr_eof(fp) = FC_EOF_N;
+ /*
+ * Form f_ctl.
+ * The number of fill bytes to make the length a 4-byte
+ * multiple is the low order 2-bits of the f_ctl.
+ * The fill itself will have been cleared by the frame
+ * allocation.
+ * After this, the length will be even, as expected by
+ * the transport.
+ */
+ fill = fr_len(fp) & 3;
+ if (fill) {
+ fill = 4 - fill;
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put(fp_skb(fp), fill);
+ hton24(fh->fh_f_ctl, f_ctl | fill);
+ }
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ /*
+ * Initialize remainig fh fields
+ * from fc_fill_fc_hdr
+ */
+ fh->fh_ox_id = htons(ep->oxid);
+ fh->fh_rx_id = htons(ep->rxid);
+ fh->fh_seq_id = ep->seq.id;
+ fh->fh_seq_cnt = htons(ep->seq.cnt);
+}
+
+
+/*
+ * Release a reference to an exchange.
+ * If the refcnt goes to zero and the exchange is complete, it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+ struct fc_exch_mgr *mp;
+
+ if (atomic_dec_and_test(&ep->ex_refcnt)) {
+ mp = ep->em;
+ if (ep->destructor)
+ ep->destructor(&ep->seq, ep->arg);
+ if (ep->lp->tt.exch_put)
+ ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
+ WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
+ mempool_free(ep, mp->ep_pool);
+ }
+}
+
+static int fc_exch_done_locked(struct fc_exch *ep)
+{
+ int rc = 1;
+
+ /*
+ * We must check for completion in case there are two threads
+ * tyring to complete this. But the rrq code will reuse the
+ * ep, and in that case we only clear the resp and set it as
+ * complete, so it can be reused by the timer to send the rrq.
+ */
+ ep->resp = NULL;
+ if (ep->state & FC_EX_DONE)
+ return rc;
+ ep->esb_stat |= ESB_ST_COMPLETE;
+
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->state |= FC_EX_DONE;
+ if (cancel_delayed_work(&ep->timeout_work))
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ rc = 0;
+ }
+ return rc;
+}
+
+static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
+{
+ struct fc_exch_mgr *mp;
+
+ mp = ep->em;
+ spin_lock_bh(&mp->em_lock);
+ WARN_ON(mp->total_exches <= 0);
+ mp->total_exches--;
+ mp->exches[ep->xid - mp->min_xid] = NULL;
+ list_del(&ep->ex_list);
+ spin_unlock_bh(&mp->em_lock);
+ fc_exch_release(ep); /* drop hold for exch in mp */
+}
+
+/*
+ * Internal version of fc_exch_timer_set - used with lock held.
+ */
+static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ return;
+
+ FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
+ ep->xid);
+ if (schedule_delayed_work(&ep->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ fc_exch_hold(ep); /* hold for timer */
+}
+
+/*
+ * Set timer for an exchange.
+ * The time is a minimum delay in milliseconds until the timer fires.
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ * Returns non-zero if a timer couldn't be allocated.
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
+{
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ struct fc_frame *fp;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ spin_unlock_bh(&ep->ex_lock);
+ return -ENXIO;
+ }
+
+ /*
+ * Send the abort on a new sequence if possible.
+ */
+ sp = fc_seq_start_next_locked(&ep->seq);
+ if (!sp) {
+ spin_unlock_bh(&ep->ex_lock);
+ return -ENOMEM;
+ }
+
+ ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * If not logged into the fabric, don't send ABTS but leave
+ * sequence active until next timeout.
+ */
+ if (!ep->sid)
+ return 0;
+
+ /*
+ * Send an abort for the sequence that timed out.
+ */
+ fp = fc_frame_alloc(ep->lp, 0);
+ if (fp) {
+ fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+ FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+ error = fc_seq_send(ep->lp, sp, fp);
+ } else
+ error = -ENOBUFS;
+ return error;
+}
+EXPORT_SYMBOL(fc_seq_exch_abort);
+
+/*
+ * Exchange timeout - handle exchange timer expiration.
+ * The timer will have been cancelled before this is called.
+ */
+static void fc_exch_timeout(struct work_struct *work)
+{
+ struct fc_exch *ep = container_of(work, struct fc_exch,
+ timeout_work.work);
+ struct fc_seq *sp = &ep->seq;
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *arg;
+ u32 e_stat;
+ int rc = 1;
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ goto unlock;
+
+ e_stat = ep->esb_stat;
+ if (e_stat & ESB_ST_COMPLETE) {
+ ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+ if (e_stat & ESB_ST_REC_QUAL)
+ fc_exch_rrq(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ goto done;
+ } else {
+ resp = ep->resp;
+ arg = ep->arg;
+ ep->resp = NULL;
+ if (e_stat & ESB_ST_ABNORMAL)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+ if (resp)
+ resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+ fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
+ goto done;
+ }
+unlock:
+ spin_unlock_bh(&ep->ex_lock);
+done:
+ /*
+ * This release matches the hold taken when the timer was set.
+ */
+ fc_exch_release(ep);
+}
+
+/*
+ * Allocate a sequence.
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+ struct fc_seq *sp;
+
+ sp = &ep->seq;
+ sp->ssb_stat = 0;
+ sp->cnt = 0;
+ sp->id = seq_id;
+ return sp;
+}
+
+/*
+ * fc_em_alloc_xid - returns an xid based on request type
+ * @lp : ptr to associated lport
+ * @fp : ptr to the assocated frame
+ *
+ * check the associated fc_fsp_pkt to get scsi command type and
+ * command direction to decide from which range this exch id
+ * will be allocated from.
+ *
+ * Returns : 0 or an valid xid
+ */
+static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
+{
+ u16 xid, min, max;
+ u16 *plast;
+ struct fc_exch *ep = NULL;
+
+ if (mp->max_read) {
+ if (fc_frame_is_read(fp)) {
+ min = mp->min_xid;
+ max = mp->max_read;
+ plast = &mp->last_read;
+ } else {
+ min = mp->max_read + 1;
+ max = mp->max_xid;
+ plast = &mp->last_xid;
+ }
+ } else {
+ min = mp->min_xid;
+ max = mp->max_xid;
+ plast = &mp->last_xid;
+ }
+ xid = *plast;
+ do {
+ xid = (xid == max) ? min : xid + 1;
+ ep = mp->exches[xid - mp->min_xid];
+ } while ((ep != NULL) && (xid != *plast));
+
+ if (unlikely(ep))
+ xid = 0;
+ else
+ *plast = xid;
+
+ return xid;
+}
+
+/*
+ * fc_exch_alloc - allocate an exchange.
+ * @mp : ptr to the exchange manager
+ * @xid: input xid
+ *
+ * if xid is supplied zero then assign next free exchange ID
+ * from exchange manager, otherwise use supplied xid.
+ * Returns with exch lock held.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
+ struct fc_frame *fp, u16 xid)
+{
+ struct fc_exch *ep;
+
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+ atomic_inc(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+
+ spin_lock_bh(&mp->em_lock);
+ /* alloc xid if input xid 0 */
+ if (!xid) {
+ /* alloc a new xid */
+ xid = fc_em_alloc_xid(mp, fp);
+ if (!xid) {
+ printk(KERN_ERR "fc_em_alloc_xid() failed\n");
+ goto err;
+ }
+ }
+
+ fc_exch_hold(ep); /* hold for exch in mp */
+ spin_lock_init(&ep->ex_lock);
+ /*
+ * Hold exch lock for caller to prevent fc_exch_reset()
+ * from releasing exch while fc_exch_alloc() caller is
+ * still working on exch.
+ */
+ spin_lock_bh(&ep->ex_lock);
+
+ mp->exches[xid - mp->min_xid] = ep;
+ list_add_tail(&ep->ex_list, &mp->ex_list);
+ fc_seq_alloc(ep, ep->seq_id++);
+ mp->total_exches++;
+ spin_unlock_bh(&mp->em_lock);
+
+ /*
+ * update exchange
+ */
+ ep->oxid = ep->xid = xid;
+ ep->em = mp;
+ ep->lp = mp->lp;
+ ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
+ ep->rxid = FC_XID_UNKNOWN;
+ ep->class = mp->class;
+ INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
+out:
+ return ep;
+err:
+ spin_unlock_bh(&mp->em_lock);
+ atomic_inc(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_alloc);
+
+/*
+ * Lookup and hold an exchange.
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+ struct fc_exch *ep = NULL;
+
+ if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+ spin_lock_bh(&mp->em_lock);
+ ep = mp->exches[xid - mp->min_xid];
+ if (ep) {
+ fc_exch_hold(ep);
+ WARN_ON(ep->xid != xid);
+ }
+ spin_unlock_bh(&mp->em_lock);
+ }
+ return ep;
+}
+
+void fc_exch_done(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+ int rc;
+
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+}
+EXPORT_SYMBOL(fc_exch_done);
+
+/*
+ * Allocate a new exchange as responder.
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+ u16 rxid;
+
+ ep = mp->lp->tt.exch_get(mp->lp, fp);
+ if (ep) {
+ ep->class = fc_frame_class(fp);
+
+ /*
+ * Set EX_CTX indicating we're responding on this exchange.
+ */
+ ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
+ fh = fc_frame_header_get(fp);
+ ep->sid = ntoh24(fh->fh_d_id);
+ ep->did = ntoh24(fh->fh_s_id);
+ ep->oid = ep->did;
+
+ /*
+ * Allocated exchange has placed the XID in the
+ * originator field. Move it to the responder field,
+ * and set the originator XID from the frame.
+ */
+ ep->rxid = ep->xid;
+ ep->oxid = ntohs(fh->fh_ox_id);
+ ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+ if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+ /*
+ * Set the responder ID in the frame header.
+ * The old one should've been 0xffff.
+ * If it isn't, don't assign one.
+ * Incoming basic link service frames may specify
+ * a referenced RX_ID.
+ */
+ if (fh->fh_type != FC_TYPE_BLS) {
+ rxid = ntohs(fh->fh_rx_id);
+ WARN_ON(rxid != FC_XID_UNKNOWN);
+ fh->fh_rx_id = htons(ep->rxid);
+ }
+ fc_exch_hold(ep); /* hold for caller */
+ spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
+ }
+ return ep;
+}
+
+/*
+ * Find a sequence for receive where the other end is originating the sequence.
+ * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
+ * on the ep that should be released by the caller.
+ */
+static enum fc_pf_rjt_reason
+fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep = NULL;
+ struct fc_seq *sp = NULL;
+ enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+ /*
+ * Lookup or create the exchange if we will be creating the sequence.
+ */
+ if (f_ctl & FC_FC_EX_CTX) {
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+ reject = FC_RJT_OX_ID;
+ goto rel;
+ }
+ } else {
+ xid = ntohs(fh->fh_rx_id); /* we are the responder */
+
+ /*
+ * Special case for MDS issuing an ELS TEST with a
+ * bad rxid of 0.
+ * XXX take this out once we do the proper reject.
+ */
+ if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fc_frame_payload_op(fp) == ELS_TEST) {
+ fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+ xid = FC_XID_UNKNOWN;
+ }
+
+ /*
+ * new sequence - find the exchange
+ */
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+ atomic_inc(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+ ep = fc_exch_resp(mp, fp);
+ if (!ep) {
+ reject = FC_RJT_EXCH_EST; /* XXX */
+ goto out;
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+ }
+
+ /*
+ * At this point, we have the exchange held.
+ * Find or create the sequence.
+ */
+ if (fc_sof_is_init(fr_sof(fp))) {
+ sp = fc_seq_start_next(&ep->seq);
+ if (!sp) {
+ reject = FC_RJT_SEQ_XS; /* exchange shortage */
+ goto rel;
+ }
+ sp->id = fh->fh_seq_id;
+ sp->ssb_stat |= SSB_ST_RESP;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
+ goto rel;
+ }
+ }
+ WARN_ON(ep != fc_seq_exch(sp));
+
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+ fr_seq(fp) = sp;
+out:
+ return reject;
+rel:
+ fc_exch_done(&ep->seq);
+ fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
+ return reject;
+}
+
+/*
+ * Find the sequence for a frame being received.
+ * We originated the sequence, so it should be found.
+ * We may or may not have originated the exchange.
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+ xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+ ep = fc_exch_find(mp, xid);
+ if (!ep)
+ return NULL;
+ if (ep->seq.id == fh->fh_seq_id) {
+ /*
+ * Save the RX_ID if we didn't previously know it.
+ */
+ sp = &ep->seq;
+ if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+ ep->rxid == FC_XID_UNKNOWN) {
+ ep->rxid = ntohs(fh->fh_rx_id);
+ }
+ }
+ fc_exch_release(ep);
+ return sp;
+}
+
+/*
+ * Set addresses for an exchange.
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+ u32 orig_id, u32 resp_id)
+{
+ ep->oid = orig_id;
+ if (ep->esb_stat & ESB_ST_RESP) {
+ ep->sid = resp_id;
+ ep->did = orig_id;
+ } else {
+ ep->sid = orig_id;
+ ep->did = resp_id;
+ }
+}
+
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ sp = fc_seq_alloc(ep, ep->seq_id++);
+ FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
+ ep->xid, ep->f_ctl, sp->id);
+ return sp;
+}
+/*
+ * Allocate a new sequence on the same exchange as the supplied sequence.
+ * This will never return NULL.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
+ sp = fc_seq_start_next_locked(sp);
+ spin_unlock_bh(&ep->ex_lock);
+
+ return sp;
+}
+EXPORT_SYMBOL(fc_seq_start_next);
+
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ int error;
+ u32 f_ctl;
+
+ ep = fc_seq_exch(sp);
+ WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+
+ /*
+ * update sequence count if this frame is carrying
+ * multiple FC frames when sequence offload is enabled
+ * by LLD.
+ */
+ if (fr_max_payload(fp))
+ sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
+ fr_max_payload(fp));
+ else
+ sp->cnt++;
+
+ /*
+ * Send the frame.
+ */
+ error = lp->tt.frame_send(lp, fp);
+
+ /*
+ * Update the exchange and sequence flags,
+ * assuming all frames for the sequence have been sent.
+ * We can only be called to send once for each sequence.
+ */
+ spin_lock_bh(&ep->ex_lock);
+ ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
+ if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+EXPORT_SYMBOL(fc_seq_send);
+
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
+{
+ switch (els_cmd) {
+ case ELS_LS_RJT:
+ fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
+ break;
+ case ELS_LS_ACC:
+ fc_seq_ls_acc(sp);
+ break;
+ case ELS_RRQ:
+ fc_exch_els_rrq(sp, els_data->fp);
+ break;
+ case ELS_REC:
+ fc_exch_els_rec(sp, els_data->fp);
+ break;
+ default:
+ FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+ }
+}
+EXPORT_SYMBOL(fc_seq_els_rsp_send);
+
+/*
+ * Send a sequence, which is also the last sequence in the exchange.
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+ enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+ u32 f_ctl;
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ f_ctl |= ep->f_ctl;
+ fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
+ fc_seq_send(ep->lp, sp, fp);
+}
+
+/*
+ * Send ACK_1 (or equiv.) indicating we received something.
+ * The frame we're acking is supplied.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ struct fc_lport *lp = ep->lp;
+ unsigned int f_ctl;
+
+ /*
+ * Don't send ACKs for class 3.
+ */
+ if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+ fp = fc_frame_alloc(lp, 0);
+ if (!fp)
+ return;
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ACK_1;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ */
+ rx_fh = fc_frame_header_get(rx_fp);
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+ fh->fh_seq_id = rx_fh->fh_seq_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_parm_offset = htonl(1); /* ack single frame */
+
+ fr_sof(fp) = fr_sof(rx_fp);
+ if (f_ctl & FC_FC_END_SEQ)
+ fr_eof(fp) = FC_EOF_T;
+ else
+ fr_eof(fp) = FC_EOF_N;
+
+ (void) lp->tt.frame_send(lp, fp);
+ }
+}
+
+/*
+ * Send BLS Reject.
+ * This is for rejecting BA_ABTS only.
+ */
+static void
+fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
+ enum fc_ba_rjt_explan explan)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_ba_rjt *rp;
+ struct fc_lport *lp;
+ unsigned int f_ctl;
+
+ lp = fr_dev(rx_fp);
+ fp = fc_frame_alloc(lp, sizeof(*rp));
+ if (!fp)
+ return;
+ fh = fc_frame_header_get(fp);
+ rx_fh = fc_frame_header_get(rx_fp);
+
+ memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ rp->br_reason = reason;
+ rp->br_explan = explan;
+
+ /*
+ * seq_id, cs_ctl, df_ctl and param/offset are zero.
+ */
+ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+ memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+ fh->fh_ox_id = rx_fh->fh_rx_id;
+ fh->fh_rx_id = rx_fh->fh_ox_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_r_ctl = FC_RCTL_BA_RJT;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ * Always set LAST_SEQ, END_SEQ.
+ */
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ f_ctl &= ~FC_FC_FIRST_SEQ;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(fr_sof(fp)))
+ fr_eof(fp) = FC_EOF_N;
+
+ (void) lp->tt.frame_send(lp, fp);
+}
+
+/*
+ * Handle an incoming ABTS. This would be for target mode usually,
+ * but could be due to lost FCP transfer ready, confirm or RRQ.
+ * We always handle this as an exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_ba_acc *ap;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ spin_unlock_bh(&ep->ex_lock);
+ goto reject;
+ }
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+ fc_exch_hold(ep); /* hold for REC_QUAL */
+ ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+
+ fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+ if (!fp) {
+ spin_unlock_bh(&ep->ex_lock);
+ goto free;
+ }
+ fh = fc_frame_header_get(fp);
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ memset(ap, 0, sizeof(*ap));
+ sp = &ep->seq;
+ ap->ba_high_seq_cnt = htons(0xffff);
+ if (sp->ssb_stat & SSB_ST_RESP) {
+ ap->ba_seq_id = sp->id;
+ ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+ ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+ ap->ba_low_seq_cnt = htons(sp->cnt);
+ }
+ sp = fc_seq_start_next(sp);
+ spin_unlock_bh(&ep->ex_lock);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+free:
+ fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle receive where the other end is originating the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = NULL;
+ struct fc_exch *ep = NULL;
+ enum fc_sof sof;
+ enum fc_eof eof;
+ u32 f_ctl;
+ enum fc_pf_rjt_reason reject;
+
+ fr_seq(fp) = NULL;
+ reject = fc_seq_lookup_recip(mp, fp);
+ if (reject == FC_RJT_NONE) {
+ sp = fr_seq(fp); /* sequence will be held */
+ ep = fc_seq_exch(sp);
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_seq_send_ack(sp, fp);
+
+ /*
+ * Call the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (ep->resp)
+ ep->resp(sp, fp, ep->arg);
+ else
+ lp->tt.lport_recv(lp, sp, fp);
+ fc_exch_release(ep); /* release from lookup */
+ } else {
+ FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
+ fc_frame_free(fp);
+ }
+}
+
+/*
+ * Handle receive where the other end is originating the sequence in
+ * response to our exchange.
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ enum fc_sof sof;
+ u32 f_ctl;
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *ex_resp_arg;
+ int rc;
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+ if (fc_sof_is_init(sof)) {
+ sp = fc_seq_start_next(&ep->seq);
+ sp->id = fh->fh_seq_id;
+ sp->ssb_stat |= SSB_ST_RESP;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ goto rel;
+ }
+ }
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = sp;
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+ if (fc_sof_needs_ack(sof))
+ fc_seq_send_ack(sp, fp);
+ resp = ep->resp;
+ ex_resp_arg = ep->arg;
+
+ if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+ (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ WARN_ON(fc_seq_exch(sp) != ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+ }
+
+ /*
+ * Call the receive function.
+ * The sequence is held (has a refcnt) for us,
+ * but not for the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (resp)
+ resp(sp, fp, ex_resp_arg);
+ else
+ fc_frame_free(fp);
+ fc_exch_release(ep);
+ return;
+rel:
+ fc_exch_release(ep);
+out:
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle receive for a sequence where other end is responding to our sequence.
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_seq *sp;
+
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+ if (!sp) {
+ atomic_inc(&mp->stats.xid_not_found);
+ FC_DEBUG_EXCH("seq lookup failed\n");
+ } else {
+ atomic_inc(&mp->stats.non_bls_resp);
+ FC_DEBUG_EXCH("non-BLS response to sequence");
+ }
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle the response to an ABTS for exchange or sequence.
+ * This can be BA_ACC or BA_RJT.
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *ex_resp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ba_acc *ap;
+ struct fc_seq *sp;
+ u16 low;
+ u16 high;
+ int rc = 1, has_rec = 0;
+
+ fh = fc_frame_header_get(fp);
+ FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
+ fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+
+ if (cancel_delayed_work_sync(&ep->timeout_work))
+ fc_exch_release(ep); /* release from pending timer hold */
+
+ spin_lock_bh(&ep->ex_lock);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ if (!ap)
+ break;
+
+ /*
+ * Decide whether to establish a Recovery Qualifier.
+ * We do this if there is a non-empty SEQ_CNT range and
+ * SEQ_ID is the same as the one we aborted.
+ */
+ low = ntohs(ap->ba_low_seq_cnt);
+ high = ntohs(ap->ba_high_seq_cnt);
+ if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+ (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+ ap->ba_seq_id == ep->seq_id) && low != high) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for recovery qualifier */
+ has_rec = 1;
+ }
+ break;
+ case FC_RCTL_BA_RJT:
+ break;
+ default:
+ break;
+ }
+
+ resp = ep->resp;
+ ex_resp_arg = ep->arg;
+
+ /* do we need to do some other checks here. Can we reuse more of
+ * fc_exch_recv_seq_resp
+ */
+ sp = &ep->seq;
+ /*
+ * do we want to check END_SEQ as well as LAST_SEQ here?
+ */
+ if (ep->fh_type != FC_TYPE_FCP &&
+ ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+
+ if (resp)
+ resp(sp, fp, ex_resp_arg);
+ else
+ fc_frame_free(fp);
+
+ if (has_rec)
+ fc_exch_timer_set(ep, ep->r_a_tov);
+
+}
+
+/*
+ * Receive BLS sequence.
+ * This is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = NULL;
+
+ ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+ if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+ spin_lock_bh(&ep->ex_lock);
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ }
+ if (f_ctl & FC_FC_SEQ_CTX) {
+ /*
+ * A response to a sequence we initiated.
+ * This should only be ACKs for class 2 or F.
+ */
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_ACK_1:
+ case FC_RCTL_ACK_0:
+ break;
+ default:
+ FC_DEBUG_EXCH("BLS rctl %x - %s received",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+ break;
+ }
+ fc_frame_free(fp);
+ } else {
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_ACC:
+ if (ep)
+ fc_exch_abts_resp(ep, fp);
+ else
+ fc_frame_free(fp);
+ break;
+ case FC_RCTL_BA_ABTS:
+ fc_exch_recv_abts(ep, fp);
+ break;
+ default: /* ignore junk */
+ fc_frame_free(fp);
+ break;
+ }
+ }
+ if (ep)
+ fc_exch_release(ep); /* release hold taken by fc_exch_find */
+}
+
+/*
+ * Accept sequence with LS_ACC.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_seq *req_sp)
+{
+ struct fc_seq *sp;
+ struct fc_els_ls_acc *acc;
+ struct fc_frame *fp;
+
+ sp = fc_seq_start_next(req_sp);
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+ if (fp) {
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->la_cmd = ELS_LS_ACC;
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ }
+}
+
+/*
+ * Reject sequence with ELS LS_RJT.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
+ enum fc_els_rjt_explan explan)
+{
+ struct fc_seq *sp;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_frame *fp;
+
+ sp = fc_seq_start_next(req_sp);
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
+ if (fp) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ memset(rjt, 0, sizeof(*rjt));
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason;
+ rjt->er_explan = explan;
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ }
+}
+
+static void fc_exch_reset(struct fc_exch *ep)
+{
+ struct fc_seq *sp;
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *);
+ void *arg;
+ int rc = 1;
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->state |= FC_EX_RST_CLEANUP;
+ /*
+ * we really want to call del_timer_sync, but cannot due
+ * to the lport calling with the lport lock held (some resp
+ * functions can also grab the lport lock which could cause
+ * a deadlock).
+ */
+ if (cancel_delayed_work(&ep->timeout_work))
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ resp = ep->resp;
+ ep->resp = NULL;
+ if (ep->esb_stat & ESB_ST_REC_QUAL)
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ arg = ep->arg;
+ sp = &ep->seq;
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+
+ if (resp)
+ resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+}
+
+/*
+ * Reset an exchange manager, releasing all sequences and exchanges.
+ * If sid is non-zero, reset only exchanges we source from that FID.
+ * If did is non-zero, reset only exchanges destined to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
+{
+ struct fc_exch *ep;
+ struct fc_exch *next;
+
+ spin_lock_bh(&mp->em_lock);
+restart:
+ list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
+ if ((sid == 0 || sid == ep->sid) &&
+ (did == 0 || did == ep->did)) {
+ fc_exch_hold(ep);
+ spin_unlock_bh(&mp->em_lock);
+
+ fc_exch_reset(ep);
+
+ fc_exch_release(ep);
+ spin_lock_bh(&mp->em_lock);
+
+ /*
+ * must restart loop incase while lock was down
+ * multiple eps were released.
+ */
+ goto restart;
+ }
+ }
+ spin_unlock_bh(&mp->em_lock);
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+/*
+ * Handle incoming ELS REC - Read Exchange Concise.
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
+{
+ struct fc_frame *fp;
+ struct fc_exch *ep;
+ struct fc_exch_mgr *em;
+ struct fc_els_rec *rp;
+ struct fc_els_rec_acc *acc;
+ enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+ enum fc_els_rjt_explan explan;
+ u32 sid;
+ u16 rxid;
+ u16 oxid;
+
+ rp = fc_frame_payload_get(rfp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+ sid = ntoh24(rp->rec_s_id);
+ rxid = ntohs(rp->rec_rx_id);
+ oxid = ntohs(rp->rec_ox_id);
+
+ /*
+ * Currently it's hard to find the local S_ID from the exchange
+ * manager. This will eventually be fixed, but for now it's easier
+ * to lookup the subject exchange twice, once as if we were
+ * the initiator, and then again if we weren't.
+ */
+ em = fc_seq_exch(sp)->em;
+ ep = fc_exch_find(em, oxid);
+ explan = ELS_EXPL_OXID_RXID;
+ if (ep && ep->oid == sid) {
+ if (ep->rxid != FC_XID_UNKNOWN &&
+ rxid != FC_XID_UNKNOWN &&
+ ep->rxid != rxid)
+ goto rel;
+ } else {
+ if (ep)
+ fc_exch_release(ep);
+ ep = NULL;
+ if (rxid != FC_XID_UNKNOWN)
+ ep = fc_exch_find(em, rxid);
+ if (!ep)
+ goto reject;
+ }
+
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+ if (!fp) {
+ fc_exch_done(sp);
+ goto out;
+ }
+ sp = fc_seq_start_next(sp);
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->reca_cmd = ELS_LS_ACC;
+ acc->reca_ox_id = rp->rec_ox_id;
+ memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+ acc->reca_rx_id = htons(ep->rxid);
+ if (ep->sid == ep->oid)
+ hton24(acc->reca_rfid, ep->did);
+ else
+ hton24(acc->reca_rfid, ep->sid);
+ acc->reca_fc4value = htonl(ep->seq.rec_data);
+ acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+ ESB_ST_SEQ_INIT |
+ ESB_ST_COMPLETE));
+ sp = fc_seq_start_next(sp);
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+out:
+ fc_exch_release(ep);
+ fc_frame_free(rfp);
+ return;
+
+rel:
+ fc_exch_release(ep);
+reject:
+ fc_seq_ls_rjt(sp, reason, explan);
+ fc_frame_free(rfp);
+}
+
+/*
+ * Handle response from RRQ.
+ * Not much to do here, really.
+ * Should report errors.
+ *
+ * TODO: fix error handler.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_exch *aborted_ep = arg;
+ unsigned int op;
+
+ if (IS_ERR(fp)) {
+ int err = PTR_ERR(fp);
+
+ if (err == -FC_EX_CLOSED)
+ goto cleanup;
+ FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+ return;
+ }
+
+ op = fc_frame_payload_op(fp);
+ fc_frame_free(fp);
+
+ switch (op) {
+ case ELS_LS_RJT:
+ FC_DBG("LS_RJT for RRQ");
+ /* fall through */
+ case ELS_LS_ACC:
+ goto cleanup;
+ default:
+ FC_DBG("unexpected response op %x for RRQ", op);
+ return;
+ }
+
+cleanup:
+ fc_exch_done(&aborted_ep->seq);
+ /* drop hold for rec qual */
+ fc_exch_release(aborted_ep);
+}
+
+/*
+ * Send ELS RRQ - Reinstate Recovery Qualifier.
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+ struct fc_lport *lp;
+ struct fc_els_rrq *rrq;
+ struct fc_frame *fp;
+ struct fc_seq *rrq_sp;
+ u32 did;
+
+ lp = ep->lp;
+
+ fp = fc_frame_alloc(lp, sizeof(*rrq));
+ if (!fp)
+ return;
+ rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+ memset(rrq, 0, sizeof(*rrq));
+ rrq->rrq_cmd = ELS_RRQ;
+ hton24(rrq->rrq_s_id, ep->sid);
+ rrq->rrq_ox_id = htons(ep->oxid);
+ rrq->rrq_rx_id = htons(ep->rxid);
+
+ did = ep->did;
+ if (ep->esb_stat & ESB_ST_RESP)
+ did = ep->sid;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
+ fc_host_port_id(lp->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
+ lp->e_d_tov);
+ if (!rrq_sp) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ return;
+ }
+}
+
+
+/*
+ * Handle incoming ELS RRQ - Reset Recovery Qualifier.
+ */
+static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
+{
+ struct fc_exch *ep; /* request or subject exchange */
+ struct fc_els_rrq *rp;
+ u32 sid;
+ u16 xid;
+ enum fc_els_rjt_explan explan;
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+
+ /*
+ * lookup subject exchange.
+ */
+ ep = fc_seq_exch(sp);
+ sid = ntoh24(rp->rrq_s_id); /* subject source */
+ xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+ ep = fc_exch_find(ep->em, xid);
+
+ explan = ELS_EXPL_OXID_RXID;
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->oxid != ntohs(rp->rrq_ox_id))
+ goto unlock_reject;
+ if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+ ep->rxid != FC_XID_UNKNOWN)
+ goto unlock_reject;
+ explan = ELS_EXPL_SID;
+ if (ep->sid != sid)
+ goto unlock_reject;
+
+ /*
+ * Clear Recovery Qualifier state, and cancel timer if complete.
+ */
+ if (ep->esb_stat & ESB_ST_REC_QUAL) {
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ if (cancel_delayed_work(&ep->timeout_work))
+ atomic_dec(&ep->ex_refcnt); /* drop timer hold */
+ }
+
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * Send LS_ACC.
+ */
+ fc_seq_ls_acc(sp);
+ fc_frame_free(fp);
+ return;
+
+unlock_reject:
+ spin_unlock_bh(&ep->ex_lock);
+ fc_exch_release(ep); /* drop hold from fc_exch_find */
+reject:
+ fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
+ fc_frame_free(fp);
+}
+
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+ enum fc_class class,
+ u16 min_xid, u16 max_xid)
+{
+ struct fc_exch_mgr *mp;
+ size_t len;
+
+ if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
+ FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
+ return NULL;
+ }
+
+ /*
+ * Memory need for EM
+ */
+#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
+ len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
+ len += sizeof(struct fc_exch_mgr);
+
+ mp = kzalloc(len, GFP_ATOMIC);
+ if (!mp)
+ return NULL;
+
+ mp->class = class;
+ mp->total_exches = 0;
+ mp->exches = (struct fc_exch **)(mp + 1);
+ mp->lp = lp;
+ /* adjust em exch xid range for offload */
+ mp->min_xid = min_xid;
+ mp->max_xid = max_xid;
+ mp->last_xid = min_xid - 1;
+ mp->max_read = 0;
+ mp->last_read = 0;
+ if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
+ mp->max_read = lp->lro_xid;
+ mp->last_read = min_xid - 1;
+ mp->last_xid = mp->max_read;
+ } else {
+ /* disable lro if no xid control over read */
+ lp->lro_enabled = 0;
+ }
+
+ INIT_LIST_HEAD(&mp->ex_list);
+ spin_lock_init(&mp->em_lock);
+
+ mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
+ if (!mp->ep_pool)
+ goto free_mp;
+
+ return mp;
+
+free_mp:
+ kfree(mp);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+void fc_exch_mgr_free(struct fc_exch_mgr *mp)
+{
+ WARN_ON(!mp);
+ /*
+ * The total exch count must be zero
+ * before freeing exchange manager.
+ */
+ WARN_ON(mp->total_exches != 0);
+ mempool_destroy(mp->ep_pool);
+ kfree(mp);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
+{
+ if (!lp || !lp->emp)
+ return NULL;
+
+ return fc_exch_alloc(lp->emp, fp, 0);
+}
+EXPORT_SYMBOL(fc_exch_get);
+
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
+{
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ struct fc_frame_header *fh;
+ int rc = 1;
+
+ ep = lp->tt.exch_get(lp, fp);
+ if (!ep) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fh = fc_frame_header_get(fp);
+ fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
+ ep->resp = resp;
+ ep->destructor = destructor;
+ ep->arg = arg;
+ ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->lp = lp;
+ sp = &ep->seq;
+
+ ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
+ ep->f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, ep->f_ctl);
+ sp->cnt++;
+
+ if (unlikely(lp->tt.frame_send(lp, fp)))
+ goto err;
+
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
+
+ if (ep->f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ return sp;
+err:
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_mgr_delete_ep(ep);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_seq_send);
+
+/*
+ * Receive a frame
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ u32 f_ctl;
+
+ /* lport lock ? */
+ if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
+ FC_DBG("fc_lport or EM is not allocated and configured");
+ fc_frame_free(fp);
+ return;
+ }
+
+ /*
+ * If frame is marked invalid, just drop it.
+ */
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ switch (fr_eof(fp)) {
+ case FC_EOF_T:
+ if (f_ctl & FC_FC_END_SEQ)
+ skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+ /* fall through */
+ case FC_EOF_N:
+ if (fh->fh_type == FC_TYPE_BLS)
+ fc_exch_recv_bls(mp, fp);
+ else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+ FC_FC_EX_CTX)
+ fc_exch_recv_seq_resp(mp, fp);
+ else if (f_ctl & FC_FC_SEQ_CTX)
+ fc_exch_recv_resp(mp, fp);
+ else
+ fc_exch_recv_req(lp, mp, fp);
+ break;
+ default:
+ FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+ fc_frame_free(fp);
+ break;
+ }
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+int fc_exch_init(struct fc_lport *lp)
+{
+ if (!lp->tt.exch_get) {
+ /*
+ * exch_put() should be NULL if
+ * exch_get() is NULL
+ */
+ WARN_ON(lp->tt.exch_put);
+ lp->tt.exch_get = fc_exch_get;
+ }
+
+ if (!lp->tt.seq_start_next)
+ lp->tt.seq_start_next = fc_seq_start_next;
+
+ if (!lp->tt.exch_seq_send)
+ lp->tt.exch_seq_send = fc_exch_seq_send;
+
+ if (!lp->tt.seq_send)
+ lp->tt.seq_send = fc_seq_send;
+
+ if (!lp->tt.seq_els_rsp_send)
+ lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+
+ if (!lp->tt.exch_done)
+ lp->tt.exch_done = fc_exch_done;
+
+ if (!lp->tt.exch_mgr_reset)
+ lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+ if (!lp->tt.seq_exch_abort)
+ lp->tt.seq_exch_abort = fc_seq_exch_abort;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
+
+int fc_setup_exch_mgr(void)
+{
+ fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fc_em_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void fc_destroy_exch_mgr(void)
+{
+ kmem_cache_destroy(fc_em_cachep);
+}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000000..404e63ff46b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL");
+
+static int fc_fcp_debug;
+
+#define FC_DEBUG_FCP(fmt...) \
+ do { \
+ if (fc_fcp_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE 0 /* cmd is free */
+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
+#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
+#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
+
+#define FC_SRB_READ (1 << 1)
+#define FC_SRB_WRITE (1 << 0)
+
+/*
+ * The SCp.ptr should be tested and set under the host lock. NULL indicates
+ * that the command has been retruned to the scsi layer.
+ */
+#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+
+struct fc_fcp_internal {
+ mempool_t *scsi_pkt_pool;
+ struct list_head scsi_pkt_queue;
+ u8 throttled;
+};
+
+#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE 0
+#define FC_CMD_ABORTED 1
+#define FC_CMD_RESET 2
+#define FC_CMD_PLOGO 3
+#define FC_SNS_RCV 4
+#define FC_TRANS_ERR 5
+#define FC_DATA_OVRRUN 6
+#define FC_DATA_UNDRUN 7
+#define FC_ERROR 8
+#define FC_HRD_ERROR 9
+#define FC_CMD_TIME_OUT 10
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_ER_TIMEOUT (10 * HZ)
+#define FC_SCSI_TM_TOV (10 * HZ)
+#define FC_SCSI_REC_TOV (2 * HZ)
+#define FC_HOST_RESET_TIMEOUT (30 * HZ)
+
+#define FC_MAX_ERROR_CNT 5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
+ * @lp: fc lport struct
+ * @gfp: gfp flags for allocation
+ *
+ * This is used by upper layer scsi driver.
+ * Return Value : scsi_pkt structure or null on allocation failure.
+ * Context : call from process context. no locking required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *fsp;
+
+ fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
+ if (fsp) {
+ memset(fsp, 0, sizeof(*fsp));
+ fsp->lp = lp;
+ atomic_set(&fsp->ref_cnt, 1);
+ init_timer(&fsp->timer);
+ INIT_LIST_HEAD(&fsp->list);
+ spin_lock_init(&fsp->scsi_pkt_lock);
+ }
+ return fsp;
+}
+
+/**
+ * fc_fcp_pkt_release - release hold on scsi_pkt packet
+ * @fsp: fcp packet struct
+ *
+ * This is used by upper layer scsi driver.
+ * Context : call from process and interrupt context.
+ * no locking required
+ */
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
+{
+ if (atomic_dec_and_test(&fsp->ref_cnt)) {
+ struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
+
+ mempool_free(fsp, si->scsi_pkt_pool);
+ }
+}
+
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
+{
+ atomic_inc(&fsp->ref_cnt);
+}
+
+/**
+ * fc_fcp_pkt_destory - release hold on scsi_pkt packet
+ *
+ * @seq: exchange sequence
+ * @fsp: fcp packet struct
+ *
+ * Release hold on scsi_pkt packet set to keep scsi_pkt
+ * till EM layer exch resource is not freed.
+ * Context : called from from EM layer.
+ * no locking required
+ */
+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
+{
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * @fsp: fcp packet
+ *
+ * We should only return error if we return a command to scsi-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->state & FC_SRB_COMPL) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ return -EPERM;
+ }
+
+ fc_fcp_pkt_hold(fsp);
+ return 0;
+}
+
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ fc_fcp_pkt_release(fsp);
+}
+
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+ if (!(fsp->state & FC_SRB_COMPL))
+ mod_timer(&fsp->timer, jiffies + delay);
+}
+
+static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+{
+ if (!fsp->seq_ptr)
+ return -EINVAL;
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+}
+
+/*
+ * Retry command.
+ * An abort isn't needed.
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+ if (fsp->seq_ptr) {
+ fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * Receive SCSI data from target.
+ * Called after receiving solicited data.
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct scsi_cmnd *sc = fsp->cmd;
+ struct fc_lport *lp = fsp->lp;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ size_t start_offset;
+ size_t offset;
+ u32 crc;
+ u32 copy_len = 0;
+ size_t len;
+ void *buf;
+ struct scatterlist *sg;
+ size_t remaining;
+
+ fh = fc_frame_header_get(fp);
+ offset = ntohl(fh->fh_parm_offset);
+ start_offset = offset;
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ if (offset + len > fsp->data_len) {
+ /*
+ * this should never happen
+ */
+ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+ fc_frame_crc_check(fp))
+ goto crc_err;
+ FC_DEBUG_FCP("data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
+ fc_fcp_retry_cmd(fsp);
+ return;
+ }
+ if (offset != fsp->xfer_len)
+ fsp->state |= FC_SRB_DISCONTIG;
+
+ crc = 0;
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+
+ sg = scsi_sglist(sc);
+ remaining = len;
+
+ while (remaining > 0 && sg) {
+ size_t off;
+ void *page_addr;
+ size_t sg_bytes;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ sg_bytes = min(remaining, sg->length - offset);
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we are limited to mapping PAGE_SIZE at a time.
+ */
+ off = offset + sg->offset;
+ sg_bytes = min(sg_bytes, (size_t)
+ (PAGE_SIZE - (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ if (!page_addr)
+ break; /* XXX panic? */
+
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(crc, buf, sg_bytes);
+ memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
+ sg_bytes);
+
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ buf += sg_bytes;
+ offset += sg_bytes;
+ remaining -= sg_bytes;
+ copy_len += sg_bytes;
+ }
+
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ buf = fc_frame_payload_get(fp, 0);
+ if (len % 4) {
+ crc = crc32(crc, buf + len, 4 - (len % 4));
+ len += 4 - (len % 4);
+ }
+
+ if (~crc != le32_to_cpu(fr_crc(fp))) {
+crc_err:
+ stats = lp->dev_stats[smp_processor_id()];
+ stats->ErrorFrames++;
+ if (stats->InvalidCRCCount++ < 5)
+ FC_DBG("CRC error on data frame\n");
+ /*
+ * Assume the frame is total garbage.
+ * We may have copied it over the good part
+ * of the buffer.
+ * If so, we need to retry the entire operation.
+ * Otherwise, ignore it.
+ */
+ if (fsp->state & FC_SRB_DISCONTIG)
+ fc_fcp_retry_cmd(fsp);
+ return;
+ }
+ }
+
+ if (fsp->xfer_contig_end == start_offset)
+ fsp->xfer_contig_end += copy_len;
+ fsp->xfer_len += copy_len;
+
+ /*
+ * In the very rare event that this data arrived after the response
+ * and completes the transfer, call the completion handler.
+ */
+ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * fc_fcp_send_data - Send SCSI data to target.
+ * @fsp: ptr to fc_fcp_pkt
+ * @sp: ptr to this sequence
+ * @offset: starting offset for this data request
+ * @seq_blen: the burst length for this data request
+ *
+ * Called after receiving a Transfer Ready data descriptor.
+ * if LLD is capable of seq offload then send down seq_blen
+ * size of data in single frame, otherwise send multiple FC
+ * frames of max FC frame payload supported by target port.
+ *
+ * Returns : 0 for success.
+ */
+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
+ size_t offset, size_t seq_blen)
+{
+ struct fc_exch *ep;
+ struct scsi_cmnd *sc;
+ struct scatterlist *sg;
+ struct fc_frame *fp = NULL;
+ struct fc_lport *lp = fsp->lp;
+ size_t remaining;
+ size_t t_blen;
+ size_t tlen;
+ size_t sg_bytes;
+ size_t frame_offset, fh_parm_offset;
+ int error;
+ void *data = NULL;
+ void *page_addr;
+ int using_sg = lp->sg_supp;
+ u32 f_ctl;
+
+ WARN_ON(seq_blen <= 0);
+ if (unlikely(offset + seq_blen > fsp->data_len)) {
+ /* this should never happen */
+ FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
+ seq_blen, offset);
+ fc_fcp_send_abort(fsp);
+ return 0;
+ } else if (offset != fsp->xfer_len) {
+ /* Out of Order Data Request - no problem, but unexpected. */
+ FC_DEBUG_FCP("xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
+ }
+
+ /*
+ * if LLD is capable of seq_offload then set transport
+ * burst length (t_blen) to seq_blen, otherwise set t_blen
+ * to max FC frame payload previously set in fsp->max_payload.
+ */
+ t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
+ WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
+ if (t_blen > 512)
+ t_blen &= ~(512 - 1); /* round down to block size */
+ WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
+ sc = fsp->cmd;
+
+ remaining = seq_blen;
+ fh_parm_offset = frame_offset = offset;
+ tlen = 0;
+ seq = lp->tt.seq_start_next(seq);
+ f_ctl = FC_FC_REL_OFF;
+ WARN_ON(!seq);
+
+ /*
+ * If a get_page()/put_page() will fail, don't use sg lists
+ * in the fc_frame structure.
+ *
+ * The put_page() may be long after the I/O has completed
+ * in the case of FCoE, since the network driver does it
+ * via free_skb(). See the test in free_pages_check().
+ *
+ * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
+ */
+ if (using_sg) {
+ for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
+ if (page_count(sg_page(sg)) == 0 ||
+ (sg_page(sg)->flags & (1 << PG_lru |
+ 1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_active |
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+ 1 << PG_buddy))) {
+ using_sg = 0;
+ break;
+ }
+ }
+ }
+ sg = scsi_sglist(sc);
+
+ while (remaining > 0 && sg) {
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ if (!fp) {
+ tlen = min(t_blen, remaining);
+
+ /*
+ * TODO. Temporary workaround. fc_seq_send() can't
+ * handle odd lengths in non-linear skbs.
+ * This will be the final fragment only.
+ */
+ if (tlen % 4)
+ using_sg = 0;
+ if (using_sg) {
+ fp = _fc_frame_alloc(lp, 0);
+ if (!fp)
+ return -ENOMEM;
+ } else {
+ fp = fc_frame_alloc(lp, tlen);
+ if (!fp)
+ return -ENOMEM;
+
+ data = (void *)(fr_hdr(fp)) +
+ sizeof(struct fc_frame_header);
+ }
+ fh_parm_offset = frame_offset;
+ fr_max_payload(fp) = fsp->max_payload;
+ }
+ sg_bytes = min(tlen, sg->length - offset);
+ if (using_sg) {
+ WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
+ FC_FRAME_SG_LEN);
+ get_page(sg_page(sg));
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
+ sg_page(sg), sg->offset + offset,
+ sg_bytes);
+ fp_skb(fp)->data_len += sg_bytes;
+ fr_len(fp) += sg_bytes;
+ fp_skb(fp)->truesize += PAGE_SIZE;
+ } else {
+ size_t off = offset + sg->offset;
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we must not cross pages inside the kmap.
+ */
+ sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
+ (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) +
+ (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+ sg_bytes);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ data += sg_bytes;
+ }
+ offset += sg_bytes;
+ frame_offset += sg_bytes;
+ tlen -= sg_bytes;
+ remaining -= sg_bytes;
+
+ if (tlen)
+ continue;
+
+ /*
+ * Send sequence with transfer sequence initiative in case
+ * this is last FCP frame of the sequence.
+ */
+ if (remaining == 0)
+ f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, fh_parm_offset);
+
+ /*
+ * send fragment using for a sequence.
+ */
+ error = lp->tt.seq_send(lp, seq, fp);
+ if (error) {
+ WARN_ON(1); /* send error should be rare */
+ fc_fcp_retry_cmd(fsp);
+ return 0;
+ }
+ fp = NULL;
+ }
+ fsp->xfer_len += seq_blen; /* premature count? */
+ return 0;
+}
+
+static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int ba_done = 1;
+ struct fc_ba_rjt *brp;
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ break;
+ case FC_RCTL_BA_RJT:
+ brp = fc_frame_payload_get(fp, sizeof(*brp));
+ if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
+ break;
+ /* fall thru */
+ default:
+ /*
+ * we will let the command timeout
+ * and scsi-ml recover in this case,
+ * therefore cleared the ba_done flag.
+ */
+ ba_done = 0;
+ }
+
+ if (ba_done) {
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
+ }
+}
+
+/*
+ * fc_fcp_reduce_can_queue - drop can_queue
+ * @lp: lport to drop queueing for
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ */
+static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ unsigned long flags;
+ int can_queue;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ if (si->throttled)
+ goto done;
+ si->throttled = 1;
+
+ can_queue = lp->host->can_queue;
+ can_queue >>= 1;
+ if (!can_queue)
+ can_queue = 1;
+ lp->host->can_queue = can_queue;
+ shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+ "Reducing can_queue to %d.\n", can_queue);
+done:
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+/*
+ * exch mgr calls this routine to process scsi
+ * exchanges.
+ *
+ * Return : None
+ * Context : called from Soft IRQ context
+ * can not called holding list lock
+ */
+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_lport *lp;
+ struct fc_frame_header *fh;
+ struct fcp_txrdy *dd;
+ u8 r_ctl;
+ int rc = 0;
+
+ if (IS_ERR(fp))
+ goto errout;
+
+ fh = fc_frame_header_get(fp);
+ r_ctl = fh->fh_r_ctl;
+ lp = fsp->lp;
+
+ if (!(lp->state & LPORT_ST_READY))
+ goto out;
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ fsp->last_pkt_time = jiffies;
+
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_abts_resp(fsp, fp);
+ goto unlock;
+ }
+
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ goto unlock;
+
+ if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+ /*
+ * received XFER RDY from the target
+ * need to send data to the target
+ */
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+ dd = fc_frame_payload_get(fp, sizeof(*dd));
+ WARN_ON(!dd);
+
+ rc = fc_fcp_send_data(fsp, seq,
+ (size_t) ntohl(dd->ft_data_ro),
+ (size_t) ntohl(dd->ft_burst_len));
+ if (!rc)
+ seq->rec_data = fsp->xfer_len;
+ else if (rc == -ENOMEM)
+ fsp->state |= FC_SRB_NOMEM;
+ } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+ /*
+ * received a DATA frame
+ * next we will copy the data to the system buffer
+ */
+ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
+ fc_fcp_recv_data(fsp, fp);
+ seq->rec_data = fsp->xfer_contig_end;
+ } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+ fc_fcp_resp(fsp, fp);
+ } else {
+ FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
+ }
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+errout:
+ if (IS_ERR(fp))
+ fc_fcp_error(fsp, fp);
+ else if (rc == -ENOMEM)
+ fc_fcp_reduce_can_queue(lp);
+}
+
+static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fcp_resp *fc_rp;
+ struct fcp_resp_ext *rp_ex;
+ struct fcp_resp_rsp_info *fc_rp_info;
+ u32 plen;
+ u32 expected_len;
+ u32 respl = 0;
+ u32 snsl = 0;
+ u8 flags = 0;
+
+ plen = fr_len(fp);
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+ if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+ goto len_err;
+ plen -= sizeof(*fh);
+ fc_rp = (struct fcp_resp *)(fh + 1);
+ fsp->cdb_status = fc_rp->fr_status;
+ flags = fc_rp->fr_flags;
+ fsp->scsi_comp_flags = flags;
+ expected_len = fsp->data_len;
+
+ if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+ rp_ex = (void *)(fc_rp + 1);
+ if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+ if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+ goto len_err;
+ fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+ if (flags & FCP_RSP_LEN_VAL) {
+ respl = ntohl(rp_ex->fr_rsp_len);
+ if (respl != sizeof(*fc_rp_info))
+ goto len_err;
+ if (fsp->wait_for_comp) {
+ /* Abuse cdb_status for rsp code */
+ fsp->cdb_status = fc_rp_info->rsp_code;
+ complete(&fsp->tm_done);
+ /*
+ * tmfs will not have any scsi cmd so
+ * exit here
+ */
+ return;
+ } else
+ goto err;
+ }
+ if (flags & FCP_SNS_LEN_VAL) {
+ snsl = ntohl(rp_ex->fr_sns_len);
+ if (snsl > SCSI_SENSE_BUFFERSIZE)
+ snsl = SCSI_SENSE_BUFFERSIZE;
+ memcpy(fsp->cmd->sense_buffer,
+ (char *)fc_rp_info + respl, snsl);
+ }
+ }
+ if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+ if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+ goto len_err;
+ if (flags & FCP_RESID_UNDER) {
+ fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+ /*
+ * The cmnd->underflow is the minimum number of
+ * bytes that must be transfered for this
+ * command. Provided a sense condition is not
+ * present, make sure the actual amount
+ * transferred is at least the underflow value
+ * or fail.
+ */
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (fc_rp->fr_status == 0) &&
+ (scsi_bufflen(fsp->cmd) -
+ fsp->scsi_resid) < fsp->cmd->underflow)
+ goto err;
+ expected_len -= fsp->scsi_resid;
+ } else {
+ fsp->status_code = FC_ERROR;
+ }
+ }
+ }
+ fsp->state |= FC_SRB_RCV_STATUS;
+
+ /*
+ * Check for missing or extra data frames.
+ */
+ if (unlikely(fsp->xfer_len != expected_len)) {
+ if (fsp->xfer_len < expected_len) {
+ /*
+ * Some data may be queued locally,
+ * Wait a at least one jiffy to see if it is delivered.
+ * If this expires without data, we may do SRR.
+ */
+ fc_fcp_timer_set(fsp, 2);
+ return;
+ }
+ fsp->status_code = FC_DATA_OVRRUN;
+ FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
+ "data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ }
+ fc_fcp_complete_locked(fsp);
+ return;
+
+len_err:
+ FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
+ flags, fr_len(fp), respl, snsl);
+err:
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_complete_locked - complete processing of a fcp packet
+ * @fsp: fcp packet
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+ struct fc_seq *seq;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ if (fsp->state & FC_SRB_ABORT_PENDING)
+ return;
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ if (!fsp->status_code)
+ fsp->status_code = FC_CMD_ABORTED;
+ } else {
+ /*
+ * Test for transport underrun, independent of response
+ * underrun status.
+ */
+ if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ fsp->status_code = FC_DATA_UNDRUN;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ }
+ }
+
+ seq = fsp->seq_ptr;
+ if (seq) {
+ fsp->seq_ptr = NULL;
+ if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+ struct fc_frame *conf_frame;
+ struct fc_seq *csp;
+
+ csp = lp->tt.seq_start_next(seq);
+ conf_frame = fc_frame_alloc(fsp->lp, 0);
+ if (conf_frame) {
+ f_ctl = FC_FC_SEQ_INIT;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
+ ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, 0);
+ lp->tt.seq_send(lp, csp, conf_frame);
+ }
+ }
+ lp->tt.exch_done(seq);
+ }
+ fc_io_compl(fsp);
+}
+
+static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->status_code = error;
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * @lp: logical port
+ * @id: target id
+ * @lun: lun
+ * @error: fsp status code
+ *
+ * If lun or id is -1, they are ignored.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+ unsigned int lun, int error)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *fsp;
+ struct scsi_cmnd *sc_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+restart:
+ list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+ sc_cmd = fsp->cmd;
+ if (id != -1 && scmd_id(sc_cmd) != id)
+ continue;
+
+ if (lun != -1 && sc_cmd->device->lun != lun)
+ continue;
+
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ if (!fc_fcp_lock_pkt(fsp)) {
+ fc_fcp_cleanup_cmd(fsp, error);
+ fc_io_compl(fsp);
+ fc_fcp_unlock_pkt(fsp);
+ }
+
+ fc_fcp_pkt_release(fsp);
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ /*
+ * while we dropped the lock multiple pkts could
+ * have been released, so we have to start over.
+ */
+ goto restart;
+ }
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+static void fc_fcp_abort_io(struct fc_lport *lp)
+{
+ fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
+}
+
+/**
+ * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * @lp: fc lport
+ * @fsp: fc packet.
+ *
+ * This is called by upper layer protocol.
+ * Return : zero for success and -1 for failure
+ * Context : called from queuecommand which can be called from process
+ * or scsi soft irq.
+ * Locks : called with the host lock and irqs disabled.
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ int rc;
+
+ fsp->cmd->SCp.ptr = (char *)fsp;
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+ int_to_scsilun(fsp->cmd->device->lun,
+ (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+ memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+
+ spin_unlock_irq(lp->host->host_lock);
+ rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
+ spin_lock_irq(lp->host->host_lock);
+ if (rc)
+ list_del(&fsp->list);
+
+ return rc;
+}
+
+static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg))
+{
+ struct fc_frame *fp;
+ struct fc_seq *seq;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ const size_t len = sizeof(fsp->cdb_cmd);
+ int rc = 0;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return 0;
+
+ fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+ if (!fp) {
+ rc = -1;
+ goto unlock;
+ }
+
+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+ fr_cmd(fp) = fsp->cmd;
+ rport = fsp->rport;
+ fsp->max_payload = rport->maxframe_size;
+ rp = rport->dd_data;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
+ fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
+ if (!seq) {
+ fc_frame_free(fp);
+ rc = -1;
+ goto unlock;
+ }
+ fsp->last_pkt_time = jiffies;
+ fsp->seq_ptr = seq;
+ fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
+
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp,
+ (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
+ FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+ return rc;
+}
+
+/*
+ * transport error handler
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ fc_fcp_retry_cmd(fsp);
+ goto unlock;
+ default:
+ FC_DBG("unknown error %ld\n", PTR_ERR(fp));
+ }
+ /*
+ * clear abort pending, because the lower layer
+ * decided to force completion.
+ */
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->status_code = FC_CMD_PLOGO;
+ fc_fcp_complete_locked(fsp);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Scsi abort handler- calls to send an abort
+ * and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ int rc = FAILED;
+
+ if (fc_fcp_send_abort(fsp))
+ return FAILED;
+
+ init_completion(&fsp->tm_done);
+ fsp->wait_for_comp = 1;
+
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->wait_for_comp = 0;
+
+ if (!rc) {
+ FC_DBG("target abort cmd failed\n");
+ rc = FAILED;
+ } else if (fsp->state & FC_SRB_ABORTED) {
+ FC_DBG("target abort cmd passed\n");
+ rc = SUCCESS;
+ fc_fcp_complete_locked(fsp);
+ }
+
+ return rc;
+}
+
+/*
+ * Retry LUN reset after resource allocation failed.
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_lport *lp = fsp->lp;
+ if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
+ if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
+ return;
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+ setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ fc_fcp_unlock_pkt(fsp);
+ }
+}
+
+/*
+ * Scsi device reset handler- send a LUN RESET to the device
+ * and wait for reset reply
+ */
+static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+ unsigned int id, unsigned int lun)
+{
+ int rc;
+
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+ int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+
+ fsp->wait_for_comp = 1;
+ init_completion(&fsp->tm_done);
+
+ fc_lun_reset_send((unsigned long)fsp);
+
+ /*
+ * wait for completion of reset
+ * after that make sure all commands are terminated
+ */
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->state |= FC_SRB_COMPL;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ del_timer_sync(&fsp->timer);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->wait_for_comp = 0;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ if (!rc) {
+ FC_DBG("lun reset failed\n");
+ return FAILED;
+ }
+
+ /* cdb_status holds the tmf's rsp code */
+ if (fsp->cdb_status != FCP_TMF_CMPL)
+ return FAILED;
+
+ FC_DBG("lun reset to lun %u completed\n", lun);
+ fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
+ return SUCCESS;
+}
+
+/*
+ * Task Managment response handler
+ */
+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ /*
+ * If there is an error just let it timeout or wait
+ * for TMF to be aborted if it timedout.
+ *
+ * scsi-eh will escalate for when either happens.
+ */
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ /*
+ * raced with eh timeout handler.
+ */
+ if (!fsp->seq_ptr || !fsp->wait_for_comp) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_BLS)
+ fc_fcp_resp(fsp, fp);
+ fsp->seq_ptr = NULL;
+ fsp->lp->tt.exch_done(seq);
+ fc_frame_free(fp);
+ fc_fcp_unlock_pkt(fsp);
+}
+
+static void fc_fcp_cleanup(struct fc_lport *lp)
+{
+ fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_ERROR);
+}
+
+/*
+ * fc_fcp_timeout: called by OS timer function.
+ *
+ * The timer has been inactivated and must be reactivated if desired
+ * using fc_fcp_timer_set().
+ *
+ * Algorithm:
+ *
+ * If REC is supported, just issue it, and return. The REC exchange will
+ * complete or time out, and recovery can continue at that point.
+ *
+ * Otherwise, if the response has been received without all the data,
+ * it has been ER_TIMEOUT since the response was received.
+ *
+ * If the response has not been received,
+ * we see if data was received recently. If it has been, we continue waiting,
+ * otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_rport *rport = fsp->rport;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (fsp->cdb_cmd.fc_tm_flags)
+ goto unlock;
+
+ fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+ if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_rec(fsp);
+ else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
+ jiffies))
+ fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
+ else
+ fc_timeout_error(fsp);
+ fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Send a REC ELS request
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp;
+ struct fc_frame *fp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+
+ lp = fsp->lp;
+ rport = fsp->rport;
+ rp = rport->dd_data;
+ if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
+ fsp->status_code = FC_HRD_ERROR;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ fc_fcp_complete_locked(fsp);
+ return;
+ }
+ fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
+ if (!fp)
+ goto retry;
+
+ fr_seq(fp) = fsp->seq_ptr;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
+ fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+ if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
+ fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
+ return;
+ }
+ fc_frame_free(fp);
+retry:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ else
+ fc_timeout_error(fsp);
+}
+
+/*
+ * Receive handler for REC ELS frame
+ * if it is a reject then let the scsi layer to handle
+ * the timeout. if it is a LS_ACC then if the io was not completed
+ * then set the timeout and return otherwise complete the exchange
+ * and tell the scsi layer to restart the I/O.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_els_rec_acc *recp;
+ struct fc_els_ls_rjt *rjt;
+ u32 e_stat;
+ u8 opcode;
+ u32 offset;
+ enum dma_data_direction data_dir;
+ enum fc_rctl r_ctl;
+ struct fc_rport_libfc_priv *rp;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_rec_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fsp->recov_retry = 0;
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ switch (rjt->er_reason) {
+ default:
+ FC_DEBUG_FCP("device %x unexpected REC reject "
+ "reason %d expl %d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
+ /* fall through */
+ case ELS_RJT_UNSUP:
+ FC_DEBUG_FCP("device does not support REC\n");
+ rp = fsp->rport->dd_data;
+ /*
+ * if we do not spport RECs or got some bogus
+ * reason then resetup timer so we check for
+ * making progress.
+ */
+ rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+ fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+ break;
+ case ELS_RJT_LOGIC:
+ case ELS_RJT_UNAB:
+ /*
+ * If no data transfer, the command frame got dropped
+ * so we just retry. If data was transferred, we
+ * lost the response but the target has no record,
+ * so we abort and retry.
+ */
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
+ fsp->xfer_len == 0) {
+ fc_fcp_retry_cmd(fsp);
+ break;
+ }
+ fc_timeout_error(fsp);
+ break;
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ if (fsp->state & FC_SRB_ABORTED)
+ goto unlock_out;
+
+ data_dir = fsp->cmd->sc_data_direction;
+ recp = fc_frame_payload_get(fp, sizeof(*recp));
+ offset = ntohl(recp->reca_fc4value);
+ e_stat = ntohl(recp->reca_e_stat);
+
+ if (e_stat & ESB_ST_COMPLETE) {
+
+ /*
+ * The exchange is complete.
+ *
+ * For output, we must've lost the response.
+ * For input, all data must've been sent.
+ * We lost may have lost the response
+ * (and a confirmation was requested) and maybe
+ * some data.
+ *
+ * If all data received, send SRR
+ * asking for response. If partial data received,
+ * or gaps, SRR requests data at start of gap.
+ * Recovery via SRR relies on in-order-delivery.
+ */
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end == offset) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else {
+ offset = fsp->xfer_contig_end;
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ } else if (e_stat & ESB_ST_SEQ_INIT) {
+
+ /*
+ * The remote port has the initiative, so just
+ * keep waiting for it to complete.
+ */
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ } else {
+
+ /*
+ * The exchange is incomplete, we have seq. initiative.
+ * Lost response with requested confirmation,
+ * lost confirmation, lost transfer ready or
+ * lost write data.
+ *
+ * For output, if not all data was received, ask
+ * for transfer ready to be repeated.
+ *
+ * If we received or sent all the data, send SRR to
+ * request response.
+ *
+ * If we lost a response, we may have lost some read
+ * data as well.
+ */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ if (offset < fsp->data_len)
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ } else if (offset == fsp->xfer_contig_end) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end < offset) {
+ offset = fsp->xfer_contig_end;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ }
+ }
+unlock_out:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle error response or timeout for REC exchange.
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ fc_fcp_retry_cmd(fsp);
+ break;
+
+ default:
+ FC_DBG("REC %p fid %x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
+ fsp->status_code = FC_CMD_PLOGO;
+ /* fall through */
+
+ case -FC_EX_TIMEOUT:
+ /*
+ * Assume REC or LS_ACC was lost.
+ * The exchange manager will have aborted REC, so retry.
+ */
+ FC_DBG("REC fid %x error error %d retry %d/%d\n",
+ fsp->rport->port_id, error, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_timeout_error(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+}
+
+/*
+ * Time out error routine:
+ * abort's the I/O close the exchange and
+ * send completion notification to scsi layer
+ */
+static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+{
+ fsp->status_code = FC_CMD_TIME_OUT;
+ fsp->cdb_status = 0;
+ fsp->io_status = 0;
+ /*
+ * if this fails then we let the scsi command timer fire and
+ * scsi-ml escalate.
+ */
+ fc_fcp_send_abort(fsp);
+}
+
+/*
+ * Sequence retransmission request.
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+ struct fc_lport *lp = fsp->lp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ struct fc_seq *seq;
+ struct fcp_srr *srr;
+ struct fc_frame *fp;
+ u8 cdb_op;
+
+ rport = fsp->rport;
+ rp = rport->dd_data;
+ cdb_op = fsp->cdb_cmd.fc_cdb[0];
+
+ if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
+ goto retry; /* shouldn't happen */
+ fp = fc_frame_alloc(lp, sizeof(*srr));
+ if (!fp)
+ goto retry;
+
+ srr = fc_frame_payload_get(fp, sizeof(*srr));
+ memset(srr, 0, sizeof(*srr));
+ srr->srr_op = ELS_SRR;
+ srr->srr_ox_id = htons(ep->oxid);
+ srr->srr_rx_id = htons(ep->rxid);
+ srr->srr_r_ctl = r_ctl;
+ srr->srr_rel_off = htonl(offset);
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
+ fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
+ if (!seq) {
+ fc_frame_free(fp);
+ goto retry;
+ }
+ fsp->recov_seq = seq;
+ fsp->xfer_len = offset;
+ fsp->xfer_contig_end = offset;
+ fsp->state &= ~FC_SRB_RCV_STATUS;
+ fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
+ return;
+retry:
+ fc_fcp_retry_cmd(fsp);
+}
+
+/*
+ * Handle response from SRR.
+ */
+static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_srr_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fh = fc_frame_header_get(fp);
+ /*
+ * BUG? fc_fcp_srr_error calls exch_done which would release
+ * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
+ * then fc_exch_timeout would be sending an abort. The exch_done
+ * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
+ * an abort response though.
+ */
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_unlock_pkt(fsp);
+ return;
+ }
+
+ fsp->recov_seq = NULL;
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ fsp->recov_retry = 0;
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ break;
+ case ELS_LS_RJT:
+ default:
+ fc_timeout_error(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+ fsp->lp->tt.exch_done(seq);
+out:
+ fc_frame_free(fp);
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+}
+
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ fsp->lp->tt.exch_done(fsp->recov_seq);
+ fsp->recov_seq = NULL;
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_TIMEOUT:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_timeout_error(fsp);
+ break;
+ case -FC_EX_CLOSED: /* e.g., link failure */
+ /* fall through */
+ default:
+ fc_fcp_retry_cmd(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+}
+
+static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
+{
+ /* lock ? */
+ return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
+}
+
+/**
+ * fc_queuecommand - The queuecommand function of the scsi template
+ * @cmd: struct scsi_cmnd to be executed
+ * @done: Callback function to be called when cmd is completed
+ *
+ * this is the i/o strategy routine, called by the scsi layer
+ * this routine is called with holding the host_lock.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct fc_lport *lp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_fcp_pkt *fsp;
+ struct fc_rport_libfc_priv *rp;
+ int rval;
+ int rc = 0;
+ struct fcoe_dev_stats *stats;
+
+ lp = shost_priv(sc_cmd->device->host);
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ done(sc_cmd);
+ goto out;
+ }
+
+ if (!*(struct fc_remote_port **)rport->dd_data) {
+ /*
+ * rport is transitioning from blocked/deleted to
+ * online
+ */
+ sc_cmd->result = DID_IMM_RETRY << 16;
+ done(sc_cmd);
+ goto out;
+ }
+
+ rp = rport->dd_data;
+
+ if (!fc_fcp_lport_queue_ready(lp)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ fsp = fc_fcp_pkt_alloc(lp, GFP_ATOMIC);
+ if (fsp == NULL) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /*
+ * build the libfc request pkt
+ */
+ fsp->cmd = sc_cmd; /* save the cmd */
+ fsp->lp = lp; /* save the softc ptr */
+ fsp->rport = rport; /* set the remote port ptr */
+ sc_cmd->scsi_done = done;
+
+ /*
+ * set up the transfer length
+ */
+ fsp->data_len = scsi_bufflen(sc_cmd);
+ fsp->xfer_len = 0;
+
+ /*
+ * setup the data direction
+ */
+ stats = lp->dev_stats[smp_processor_id()];
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ fsp->req_flags = FC_SRB_READ;
+ stats->InputRequests++;
+ stats->InputMegabytes = fsp->data_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ fsp->req_flags = FC_SRB_WRITE;
+ stats->OutputRequests++;
+ stats->OutputMegabytes = fsp->data_len;
+ } else {
+ fsp->req_flags = 0;
+ stats->ControlRequests++;
+ }
+
+ fsp->tgt_flags = rp->flags;
+
+ init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
+
+ /*
+ * send it to the lower layer
+ * if we get -1 return then put the request in the pending
+ * queue.
+ */
+ rval = fc_fcp_pkt_send(lp, fsp);
+ if (rval != 0) {
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl - Handle responses for completed commands
+ * @fsp: scsi packet
+ *
+ * Translates a error to a Linux SCSI error.
+ *
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si;
+ struct scsi_cmnd *sc_cmd;
+ struct fc_lport *lp;
+ unsigned long flags;
+
+ fsp->state |= FC_SRB_COMPL;
+ if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ del_timer_sync(&fsp->timer);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ }
+
+ lp = fsp->lp;
+ si = fc_get_scsi_internal(lp);
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ if (!fsp->cmd) {
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return;
+ }
+
+ /*
+ * if a command timed out while we had to try and throttle IO
+ * and it is now getting cleaned up, then we are about to
+ * try again so clear the throttled flag incase we get more
+ * time outs.
+ */
+ if (si->throttled && fsp->state & FC_SRB_NOMEM)
+ si->throttled = 0;
+
+ sc_cmd = fsp->cmd;
+ fsp->cmd = NULL;
+
+ if (!sc_cmd->SCp.ptr) {
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return;
+ }
+
+ CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+ switch (fsp->status_code) {
+ case FC_COMPLETE:
+ if (fsp->cdb_status == 0) {
+ /*
+ * good I/O status
+ */
+ sc_cmd->result = DID_OK << 16;
+ if (fsp->scsi_resid)
+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ } else if (fsp->cdb_status == QUEUE_FULL) {
+ struct scsi_device *tmp_sdev;
+ struct scsi_device *sdev = sc_cmd->device;
+
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->id != sdev->id)
+ continue;
+
+ if (tmp_sdev->queue_depth > 1) {
+ scsi_track_queue_full(tmp_sdev,
+ tmp_sdev->
+ queue_depth - 1);
+ }
+ }
+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+ } else {
+ /*
+ * transport level I/O was ok but scsi
+ * has non zero status
+ */
+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_ERROR:
+ sc_cmd->result = DID_ERROR << 16;
+ break;
+ case FC_DATA_UNDRUN:
+ if (fsp->cdb_status == 0) {
+ /*
+ * scsi status is good but transport level
+ * underrun. for read it should be an error??
+ */
+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+ } else {
+ /*
+ * scsi got underrun, this is an error
+ */
+ CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_DATA_OVRRUN:
+ /*
+ * overrun is an error
+ */
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ break;
+ case FC_CMD_ABORTED:
+ sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+ break;
+ case FC_CMD_TIME_OUT:
+ sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+ break;
+ case FC_CMD_RESET:
+ sc_cmd->result = (DID_RESET << 16);
+ break;
+ case FC_HRD_ERROR:
+ sc_cmd->result = (DID_NO_CONNECT << 16);
+ break;
+ default:
+ sc_cmd->result = (DID_ERROR << 16);
+ break;
+ }
+
+ list_del(&fsp->list);
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ /* release ref from initial allocation in queue command */
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_complete - complete processing of a fcp packet
+ * @fsp: fcp packet
+ *
+ * This function may sleep if a fsp timer is pending.
+ * The host lock must not be held by caller.
+ */
+void fc_fcp_complete(struct fc_fcp_pkt *fsp)
+{
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ fc_fcp_complete_locked(fsp);
+ fc_fcp_unlock_pkt(fsp);
+}
+EXPORT_SYMBOL(fc_fcp_complete);
+
+/**
+ * fc_eh_abort - Abort a command...from scsi host template
+ * @sc_cmd: scsi command to abort
+ *
+ * send ABTS to the target device and wait for the response
+ * sc_cmd is the pointer to the command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_fcp_pkt *fsp;
+ struct fc_lport *lp;
+ int rc = FAILED;
+ unsigned long flags;
+
+ lp = shost_priv(sc_cmd->device->host);
+ if (lp->state != LPORT_ST_READY)
+ return rc;
+ else if (!(lp->link_status & FC_LINK_UP))
+ return rc;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ fsp = CMD_SP(sc_cmd);
+ if (!fsp) {
+ /* command completed while scsi eh was setting up */
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return SUCCESS;
+ }
+ /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ if (fc_fcp_lock_pkt(fsp)) {
+ /* completed while we were waiting for timer to be deleted */
+ rc = SUCCESS;
+ goto release_pkt;
+ }
+
+ rc = fc_fcp_pkt_abort(lp, fsp);
+ fc_fcp_unlock_pkt(fsp);
+
+release_pkt:
+ fc_fcp_pkt_release(fsp);
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset: Reset a single LUN
+ * @sc_cmd: scsi command
+ *
+ * Set from scsi host template to send tm cmd to the target and wait for the
+ * response.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lp;
+ struct fc_fcp_pkt *fsp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ int rc = FAILED;
+ struct fc_rport_libfc_priv *rp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval)
+ goto out;
+
+ rp = rport->dd_data;
+ lp = shost_priv(sc_cmd->device->host);
+
+ if (lp->state != LPORT_ST_READY)
+ return rc;
+
+ fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
+ if (fsp == NULL) {
+ FC_DBG("could not allocate scsi_pkt\n");
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+
+ /*
+ * Build the libfc request pkt. Do not set the scsi cmnd, because
+ * the sc passed in is not setup for execution like when sent
+ * through the queuecommand callout.
+ */
+ fsp->lp = lp; /* save the softc ptr */
+ fsp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * flush outstanding commands
+ */
+ rc = fc_lun_reset(lp, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset - The reset function will reset the ports on the host.
+ * @sc_cmd: scsi command
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct Scsi_Host *shost = sc_cmd->device->host;
+ struct fc_lport *lp = shost_priv(shost);
+ unsigned long wait_tmo;
+
+ lp->tt.lport_reset(lp);
+ wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+ while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
+ msleep(1000);
+
+ if (fc_fcp_lport_queue_ready(lp)) {
+ shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
+ return SUCCESS;
+ } else {
+ shost_printk(KERN_INFO, shost, "Host reset failed. "
+ "lport not ready.\n");
+ return FAILED;
+ }
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc - configure queue depth
+ * @sdev: scsi device
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ int queue_depth;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ if (sdev->tagged_supported) {
+ if (sdev->host->hostt->cmd_per_lun)
+ queue_depth = sdev->host->hostt->cmd_per_lun;
+ else
+ queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
+ scsi_activate_tcq(sdev, queue_depth);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ return sdev->queue_depth;
+}
+EXPORT_SYMBOL(fc_change_queue_depth);
+
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+EXPORT_SYMBOL(fc_change_queue_type);
+
+void fc_fcp_destroy(struct fc_lport *lp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+
+ if (!list_empty(&si->scsi_pkt_queue))
+ printk(KERN_ERR "Leaked scsi packets.\n");
+
+ mempool_destroy(si->scsi_pkt_pool);
+ kfree(si);
+ lp->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_fcp_init(struct fc_lport *lp)
+{
+ int rc;
+ struct fc_fcp_internal *si;
+
+ if (!lp->tt.fcp_cmd_send)
+ lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
+
+ if (!lp->tt.fcp_cleanup)
+ lp->tt.fcp_cleanup = fc_fcp_cleanup;
+
+ if (!lp->tt.fcp_abort_io)
+ lp->tt.fcp_abort_io = fc_fcp_abort_io;
+
+ si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+ lp->scsi_priv = si;
+ INIT_LIST_HEAD(&si->scsi_pkt_queue);
+
+ si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+ if (!si->scsi_pkt_pool) {
+ rc = -ENOMEM;
+ goto free_internal;
+ }
+ return 0;
+
+free_internal:
+ kfree(si);
+ return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
+
+static int __init libfc_init(void)
+{
+ int rc;
+
+ scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+ sizeof(struct fc_fcp_pkt),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (scsi_pkt_cachep == NULL) {
+ FC_DBG("Unable to allocate SRB cache...module load failed!");
+ return -ENOMEM;
+ }
+
+ rc = fc_setup_exch_mgr();
+ if (rc)
+ goto destroy_pkt_cache;
+
+ rc = fc_setup_rport();
+ if (rc)
+ goto destroy_em;
+
+ return rc;
+destroy_em:
+ fc_destroy_exch_mgr();
+destroy_pkt_cache:
+ kmem_cache_destroy(scsi_pkt_cachep);
+ return rc;
+}
+
+static void __exit libfc_exit(void)
+{
+ kmem_cache_destroy(scsi_pkt_cachep);
+ fc_destroy_exch_mgr();
+ fc_destroy_rport();
+}
+
+module_init(libfc_init);
+module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000000..63fe00cfe667
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <scsi/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+ u32 crc;
+ u32 error;
+ const u8 *bp;
+ unsigned int len;
+
+ WARN_ON(!fc_frame_is_linear(fp));
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
+ bp = (const u8 *) fr_hdr(fp);
+ crc = ~crc32(~0, bp, len);
+ error = crc ^ fr_crc(fp);
+ return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent via fcoe_xmit.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *__fc_frame_alloc(size_t len)
+{
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+
+ WARN_ON((len % sizeof(u32)) != 0);
+ len += sizeof(struct fc_frame_header);
+ skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
+ if (!skb)
+ return NULL;
+ fp = (struct fc_frame *) skb;
+ fc_frame_init(fp);
+ skb_reserve(skb, FC_FRAME_HEADROOM);
+ skb_put(skb, len);
+ return fp;
+}
+EXPORT_SYMBOL(__fc_frame_alloc);
+
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+ struct fc_frame *fp;
+ size_t fill;
+
+ fill = payload_len % 4;
+ if (fill != 0)
+ fill = 4 - fill;
+ fp = __fc_frame_alloc(payload_len + fill);
+ if (fp) {
+ memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+ /* trim is OK, we just allocated it so there are no fragments */
+ skb_trim(fp_skb(fp),
+ payload_len + sizeof(struct fc_frame_header));
+ }
+ return fp;
+}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000000..0b9bdb1fb807
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,1604 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * PORT LOCKING NOTES
+ *
+ * These comments only apply to the 'port code' which consists of the lport,
+ * disc and rport blocks.
+ *
+ * MOTIVATION
+ *
+ * The lport, disc and rport blocks all have mutexes that are used to protect
+ * those objects. The main motivation for these locks is to prevent from
+ * having an lport reset just before we send a frame. In that scenario the
+ * lport's FID would get set to zero and then we'd send a frame with an
+ * invalid SID. We also need to ensure that states don't change unexpectedly
+ * while processing another state.
+ *
+ * HEIRARCHY
+ *
+ * The following heirarchy defines the locking rules. A greater lock
+ * may be held before acquiring a lesser lock, but a lesser lock should never
+ * be held while attempting to acquire a greater lock. Here is the heirarchy-
+ *
+ * lport > disc, lport > rport, disc > rport
+ *
+ * CALLBACKS
+ *
+ * The callbacks cause complications with this scheme. There is a callback
+ * from the rport (to either lport or disc) and a callback from disc
+ * (to the lport).
+ *
+ * As rports exit the rport state machine a callback is made to the owner of
+ * the rport to notify success or failure. Since the callback is likely to
+ * cause the lport or disc to grab its lock we cannot hold the rport lock
+ * while making the callback. To ensure that the rport is not free'd while
+ * processing the callback the rport callbacks are serialized through a
+ * single-threaded workqueue. An rport would never be free'd while in a
+ * callback handler becuase no other rport work in this queue can be executed
+ * at the same time.
+ *
+ * When discovery succeeds or fails a callback is made to the lport as
+ * notification. Currently, succesful discovery causes the lport to take no
+ * action. A failure will cause the lport to reset. There is likely a circular
+ * locking problem with this implementation.
+ */
+
+/*
+ * LPORT LOCKING
+ *
+ * The critical sections protected by the lport's mutex are quite broad and
+ * may be improved upon in the future. The lport code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't penalize I/O
+ * performance.
+ *
+ * The strategy is to lock whenever processing a request or response. Note
+ * that every _enter_* function corresponds to a state change. They generally
+ * change the lports state and then send a request out on the wire. We lock
+ * before calling any of these functions to protect that state change. This
+ * means that the entry points into the lport block manage the locks while
+ * the state machine can transition between states (i.e. _enter_* functions)
+ * while always staying protected.
+ *
+ * When handling responses we also hold the lport mutex broadly. When the
+ * lport receives the response frame it locks the mutex and then calls the
+ * appropriate handler for the particuar response. Generally a response will
+ * trigger a state change and so the lock must already be held.
+ *
+ * Retries also have to consider the locking. The retries occur from a work
+ * context and the work function will lock the lport and then retry the state
+ * (i.e. _enter_* function).
+ */
+
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO 0x010101
+#define FC_LOCAL_PTP_FID_HI 0x010102
+
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+
+static int fc_lport_debug;
+
+#define FC_DEBUG_LPORT(fmt...) \
+ do { \
+ if (fc_lport_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+static void fc_lport_error(struct fc_lport *, struct fc_frame *);
+
+static void fc_lport_enter_reset(struct fc_lport *);
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_dns(struct fc_lport *);
+static void fc_lport_enter_rpn_id(struct fc_lport *);
+static void fc_lport_enter_rft_id(struct fc_lport *);
+static void fc_lport_enter_scr(struct fc_lport *);
+static void fc_lport_enter_ready(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+
+static const char *fc_lport_state_names[] = {
+ [LPORT_ST_NONE] = "none",
+ [LPORT_ST_FLOGI] = "FLOGI",
+ [LPORT_ST_DNS] = "dNS",
+ [LPORT_ST_RPN_ID] = "RPN_ID",
+ [LPORT_ST_RFT_ID] = "RFT_ID",
+ [LPORT_ST_SCR] = "SCR",
+ [LPORT_ST_READY] = "Ready",
+ [LPORT_ST_LOGO] = "LOGO",
+ [LPORT_ST_RESET] = "reset",
+};
+
+static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
+{
+ fc_frame_free(fp);
+ return 0;
+}
+
+/**
+ * fc_lport_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+ *
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+static void fc_lport_rport_callback(struct fc_lport *lport,
+ struct fc_rport *rport,
+ enum fc_rport_event event)
+{
+ FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
+ rport->port_id);
+
+ switch (event) {
+ case RPORT_EV_CREATED:
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ mutex_lock(&lport->lp_mutex);
+ if (lport->state == LPORT_ST_DNS) {
+ lport->dns_rp = rport;
+ fc_lport_enter_rpn_id(lport);
+ } else {
+ FC_DEBUG_LPORT("Received an CREATED event on "
+ "port (%6x) for the directory "
+ "server, but the lport is not "
+ "in the DNS state, it's in the "
+ "%d state", rport->port_id,
+ lport->state);
+ lport->tt.rport_logoff(rport);
+ }
+ mutex_unlock(&lport->lp_mutex);
+ } else
+ FC_DEBUG_LPORT("Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ mutex_lock(&lport->lp_mutex);
+ lport->dns_rp = NULL;
+ mutex_unlock(&lport->lp_mutex);
+
+ } else
+ FC_DEBUG_LPORT("Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
+ break;
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * fc_lport_state - Return a string which represents the lport's state
+ * @lport: The lport whose state is to converted to a string
+ */
+static const char *fc_lport_state(struct fc_lport *lport)
+{
+ const char *cp;
+
+ cp = fc_lport_state_names[lport->state];
+ if (!cp)
+ cp = "unknown";
+ return cp;
+}
+
+/**
+ * fc_lport_ptp_setup - Create an rport for point-to-point mode
+ * @lport: The lport to attach the ptp rport to
+ * @fid: The FID of the ptp rport
+ * @remote_wwpn: The WWPN of the ptp rport
+ * @remote_wwnn: The WWNN of the ptp rport
+ */
+static void fc_lport_ptp_setup(struct fc_lport *lport,
+ u32 remote_fid, u64 remote_wwpn,
+ u64 remote_wwnn)
+{
+ struct fc_disc_port dp;
+
+ dp.lp = lport;
+ dp.ids.port_id = remote_fid;
+ dp.ids.port_name = remote_wwpn;
+ dp.ids.node_name = remote_wwnn;
+ dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (lport->ptp_rp) {
+ lport->tt.rport_logoff(lport->ptp_rp);
+ lport->ptp_rp = NULL;
+ }
+
+ lport->ptp_rp = fc_rport_rogue_create(&dp);
+
+ lport->tt.rport_login(lport->ptp_rp);
+
+ fc_lport_enter_ready(lport);
+}
+
+void fc_get_host_port_type(struct Scsi_Host *shost)
+{
+ /* TODO - currently just NPORT */
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+}
+EXPORT_SYMBOL(fc_get_host_port_type);
+
+void fc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+
+ if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+EXPORT_SYMBOL(fc_get_host_port_state);
+
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_speed(shost) = lport->link_speed;
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+ int i;
+ struct fc_host_statistics *fcoe_stats;
+ struct fc_lport *lp = shost_priv(shost);
+ struct timespec v0, v1;
+
+ fcoe_stats = &lp->host_stats;
+ memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
+
+ jiffies_to_timespec(jiffies, &v0);
+ jiffies_to_timespec(lp->boot_time, &v1);
+ fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+
+ for_each_online_cpu(i) {
+ struct fcoe_dev_stats *stats = lp->dev_stats[i];
+ if (stats == NULL)
+ continue;
+ fcoe_stats->tx_frames += stats->TxFrames;
+ fcoe_stats->tx_words += stats->TxWords;
+ fcoe_stats->rx_frames += stats->RxFrames;
+ fcoe_stats->rx_words += stats->RxWords;
+ fcoe_stats->error_frames += stats->ErrorFrames;
+ fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
+ fcoe_stats->fcp_input_requests += stats->InputRequests;
+ fcoe_stats->fcp_output_requests += stats->OutputRequests;
+ fcoe_stats->fcp_control_requests += stats->ControlRequests;
+ fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
+ fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+ fcoe_stats->link_failure_count += stats->LinkFailureCount;
+ }
+ fcoe_stats->lip_count = -1;
+ fcoe_stats->nos_count = -1;
+ fcoe_stats->loss_of_sync_count = -1;
+ fcoe_stats->loss_of_signal_count = -1;
+ fcoe_stats->prim_seq_protocol_err_count = -1;
+ fcoe_stats->dumped_frames = -1;
+ return fcoe_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
+
+/*
+ * Fill in FLOGI command for request.
+ */
+static void
+fc_lport_flogi_fill(struct fc_lport *lport, struct fc_els_flogi *flogi,
+ unsigned int op)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (op != ELS_FLOGI) {
+ sp->sp_features = htons(FC_SP_FT_CIRO);
+ sp->sp_tot_seq = htons(255); /* seq. we accept */
+ sp->sp_rel_off = htons(0x1f);
+ sp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+ }
+}
+
+/*
+ * Add a supported FC-4 type.
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
+{
+ __be32 *mp;
+
+ mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
+ *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/**
+ * fc_lport_recv_rlir_req - Handle received Registered Link Incident Report.
+ * @lport: Fibre Channel local port recieving the RLIR
+ * @sp: current sequence in the RLIR exchange
+ * @fp: RLIR request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lport)
+{
+ FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
+
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_lport_recv_echo_req - Handle received ECHO request
+ * @lport: Fibre Channel local port recieving the ECHO
+ * @sp: current sequence in the ECHO exchange
+ * @fp: ECHO request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
+ struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ unsigned int len;
+ void *pp;
+ void *dp;
+ u32 f_ctl;
+
+ FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
+
+ len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(in_fp, len);
+
+ if (len < sizeof(__be32))
+ len = sizeof(__be32);
+
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ dp = fc_frame_payload_get(fp, len);
+ memcpy(dp, pp, len);
+ *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
+ sp = lport->tt.seq_start_next(sp);
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_echo_req - Handle received Request Node ID data request
+ * @lport: Fibre Channel local port recieving the RNID
+ * @sp: current sequence in the RNID exchange
+ * @fp: RNID request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
+ struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ struct fc_els_rnid *req;
+ struct {
+ struct fc_els_rnid_resp rnid;
+ struct fc_els_rnid_cid cid;
+ struct fc_els_rnid_gen gen;
+ } *rp;
+ struct fc_seq_els_data rjt_data;
+ u8 fmt;
+ size_t len;
+ u32 f_ctl;
+
+ FC_DEBUG_LPORT("Received RNID request while in state %s\n",
+ fc_lport_state(lport));
+
+ req = fc_frame_payload_get(in_fp, sizeof(*req));
+ if (!req) {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ } else {
+ fmt = req->rnid_fmt;
+ len = sizeof(*rp);
+ if (fmt != ELS_RNIDF_GEN ||
+ ntohl(lport->rnid_gen.rnid_atype) == 0) {
+ fmt = ELS_RNIDF_NONE; /* nothing to provide */
+ len -= sizeof(rp->gen);
+ }
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ rp = fc_frame_payload_get(fp, len);
+ memset(rp, 0, len);
+ rp->rnid.rnid_cmd = ELS_LS_ACC;
+ rp->rnid.rnid_fmt = fmt;
+ rp->rnid.rnid_cid_len = sizeof(rp->cid);
+ rp->cid.rnid_wwpn = htonll(lport->wwpn);
+ rp->cid.rnid_wwnn = htonll(lport->wwnn);
+ if (fmt == ELS_RNIDF_GEN) {
+ rp->rnid.rnid_sid_len = sizeof(rp->gen);
+ memcpy(&rp->gen, &lport->rnid_gen,
+ sizeof(rp->gen));
+ }
+ sp = lport->tt.seq_start_next(sp);
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+ }
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_adisc_req - Handle received Address Discovery Request
+ * @lport: Fibre Channel local port recieving the ADISC
+ * @sp: current sequence in the ADISC exchange
+ * @fp: ADISC request frame
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
+ struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ struct fc_els_adisc *req, *rp;
+ struct fc_seq_els_data rjt_data;
+ size_t len;
+ u32 f_ctl;
+
+ FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
+ fc_lport_state(lport));
+
+ req = fc_frame_payload_get(in_fp, sizeof(*req));
+ if (!req) {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ } else {
+ len = sizeof(*rp);
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ rp = fc_frame_payload_get(fp, len);
+ memset(rp, 0, len);
+ rp->adisc_cmd = ELS_LS_ACC;
+ rp->adisc_wwpn = htonll(lport->wwpn);
+ rp->adisc_wwnn = htonll(lport->wwnn);
+ hton24(rp->adisc_port_id,
+ fc_host_port_id(lport->host));
+ sp = lport->tt.seq_start_next(sp);
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+ }
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_logo_req - Handle received fabric LOGO request
+ * @lport: Fibre Channel local port recieving the LOGO
+ * @sp: current sequence in the LOGO exchange
+ * @fp: LOGO request frame
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lport)
+{
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_lport_enter_reset(lport);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fabric_login - Start the lport state machine
+ * @lport: The lport that should log into the fabric
+ *
+ * Locking Note: This function should not be called
+ * with the lport lock held.
+ */
+int fc_fabric_login(struct fc_lport *lport)
+{
+ int rc = -1;
+
+ mutex_lock(&lport->lp_mutex);
+ if (lport->state == LPORT_ST_NONE) {
+ fc_lport_enter_reset(lport);
+ rc = 0;
+ }
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * fc_linkup - Handler for transport linkup events
+ * @lport: The lport whose link is up
+ */
+void fc_linkup(struct fc_lport *lport)
+{
+ FC_DEBUG_LPORT("Link is up for port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+ mutex_lock(&lport->lp_mutex);
+ if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
+ lport->link_status |= FC_LINK_UP;
+
+ if (lport->state == LPORT_ST_RESET)
+ fc_lport_enter_flogi(lport);
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * fc_linkdown - Handler for transport linkdown events
+ * @lport: The lport whose link is down
+ */
+void fc_linkdown(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ FC_DEBUG_LPORT("Link is down for port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+ if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
+ lport->link_status &= ~(FC_LINK_UP);
+ fc_lport_enter_reset(lport);
+ lport->tt.fcp_cleanup(lport);
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+/**
+ * fc_pause - Pause the flow of frames
+ * @lport: The lport to be paused
+ */
+void fc_pause(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ lport->link_status |= FC_PAUSE;
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_pause);
+
+/**
+ * fc_unpause - Unpause the flow of frames
+ * @lport: The lport to be unpaused
+ */
+void fc_unpause(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ lport->link_status &= ~(FC_PAUSE);
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_unpause);
+
+/**
+ * fc_fabric_logoff - Logout of the fabric
+ * @lport: fc_lport pointer to logoff the fabric
+ *
+ * Return value:
+ * 0 for success, -1 for failure
+ **/
+int fc_fabric_logoff(struct fc_lport *lport)
+{
+ lport->tt.disc_stop_final(lport);
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_logo(lport);
+ mutex_unlock(&lport->lp_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy - unregister a fc_lport
+ * @lport: fc_lport pointer to unregister
+ *
+ * Return value:
+ * None
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ **/
+int fc_lport_destroy(struct fc_lport *lport)
+{
+ lport->tt.frame_send = fc_frame_drop;
+ lport->tt.fcp_abort_io(lport);
+ lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+/**
+ * fc_set_mfs - sets up the mfs for the corresponding fc_lport
+ * @lport: fc_lport pointer to unregister
+ * @mfs: the new mfs for fc_lport
+ *
+ * Set mfs for the given fc_lport to the new mfs.
+ *
+ * Return: 0 for success
+ *
+ **/
+int fc_set_mfs(struct fc_lport *lport, u32 mfs)
+{
+ unsigned int old_mfs;
+ int rc = -EINVAL;
+
+ mutex_lock(&lport->lp_mutex);
+
+ old_mfs = lport->mfs;
+
+ if (mfs >= FC_MIN_MAX_FRAME) {
+ mfs &= ~3;
+ if (mfs > FC_MAX_FRAME)
+ mfs = FC_MAX_FRAME;
+ mfs -= sizeof(struct fc_frame_header);
+ lport->mfs = mfs;
+ rc = 0;
+ }
+
+ if (!rc && mfs < old_mfs)
+ fc_lport_enter_reset(lport);
+
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/**
+ * fc_lport_disc_callback - Callback for discovery events
+ * @lport: FC local port
+ * @event: The discovery event
+ */
+void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+{
+ switch (event) {
+ case DISC_EV_SUCCESS:
+ FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
+ fc_host_port_id(lport->host));
+ break;
+ case DISC_EV_FAILED:
+ FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
+ fc_host_port_id(lport->host));
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+ case DISC_EV_NONE:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * fc_rport_enter_ready - Enter the ready state and start discovery
+ * @lport: Fibre Channel local port that is ready
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_ready(struct fc_lport *lport)
+{
+ FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_READY);
+
+ lport->tt.disc_start(fc_lport_disc_callback, lport);
+}
+
+/**
+ * fc_lport_recv_flogi_req - Receive a FLOGI request
+ * @sp_in: The sequence the FLOGI is on
+ * @rx_fp: The frame the FLOGI is in
+ * @lport: The lport that recieved the request
+ *
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ *
+ * Locking Note: The lport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
+ struct fc_frame *rx_fp,
+ struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ struct fc_els_flogi *flp;
+ struct fc_els_flogi *new_flp;
+ u64 remote_wwpn;
+ u32 remote_fid;
+ u32 local_fid;
+ u32 f_ctl;
+
+ FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
+ fc_lport_state(lport));
+
+ fh = fc_frame_header_get(rx_fp);
+ remote_fid = ntoh24(fh->fh_s_id);
+ flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+ if (!flp)
+ goto out;
+ remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+ if (remote_wwpn == lport->wwpn) {
+ FC_DBG("FLOGI from port with same WWPN %llx "
+ "possible configuration error\n", remote_wwpn);
+ goto out;
+ }
+ FC_DBG("FLOGI from port WWPN %llx\n", remote_wwpn);
+
+ /*
+ * XXX what is the right thing to do for FIDs?
+ * The originator might expect our S_ID to be 0xfffffe.
+ * But if so, both of us could end up with the same FID.
+ */
+ local_fid = FC_LOCAL_PTP_FID_LO;
+ if (remote_wwpn < lport->wwpn) {
+ local_fid = FC_LOCAL_PTP_FID_HI;
+ if (!remote_fid || remote_fid == local_fid)
+ remote_fid = FC_LOCAL_PTP_FID_LO;
+ } else if (!remote_fid) {
+ remote_fid = FC_LOCAL_PTP_FID_HI;
+ }
+
+ fc_host_port_id(lport->host) = local_fid;
+
+ fp = fc_frame_alloc(lport, sizeof(*flp));
+ if (fp) {
+ sp = lport->tt.seq_start_next(fr_seq(rx_fp));
+ new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+ fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
+ new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+ /*
+ * Send the response. If this fails, the originator should
+ * repeat the sequence.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+
+ } else {
+ fc_lport_error(lport, fp);
+ }
+ fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
+ get_unaligned_be64(&flp->fl_wwnn));
+
+ lport->tt.disc_start(fc_lport_disc_callback, lport);
+
+out:
+ sp = fr_seq(rx_fp);
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_lport_recv_req - The generic lport request handler
+ * @lport: The lport that received the request
+ * @sp: The sequence the request is on
+ * @fp: The frame the request is in
+ *
+ * This function will see if the lport handles the request or
+ * if an rport should handle the request.
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held becuase it will grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
+ struct fc_rport *rport;
+ u32 s_id;
+ u32 d_id;
+ struct fc_seq_els_data rjt_data;
+
+ mutex_lock(&lport->lp_mutex);
+
+ /*
+ * Handle special ELS cases like FLOGI, LOGO, and
+ * RSCN here. These don't require a session.
+ * Even if we had a session, it might not be ready.
+ */
+ if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ /*
+ * Check opcode.
+ */
+ recv = NULL;
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ recv = fc_lport_recv_flogi_req;
+ break;
+ case ELS_LOGO:
+ fh = fc_frame_header_get(fp);
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
+ recv = fc_lport_recv_logo_req;
+ break;
+ case ELS_RSCN:
+ recv = lport->tt.disc_recv_req;
+ break;
+ case ELS_ECHO:
+ recv = fc_lport_recv_echo_req;
+ break;
+ case ELS_RLIR:
+ recv = fc_lport_recv_rlir_req;
+ break;
+ case ELS_RNID:
+ recv = fc_lport_recv_rnid_req;
+ break;
+ case ELS_ADISC:
+ recv = fc_lport_recv_adisc_req;
+ break;
+ }
+
+ if (recv)
+ recv(sp, fp, lport);
+ else {
+ /*
+ * Find session.
+ * If this is a new incoming PLOGI, we won't find it.
+ */
+ s_id = ntoh24(fh->fh_s_id);
+ d_id = ntoh24(fh->fh_d_id);
+
+ rport = lport->tt.rport_lookup(lport, s_id);
+ if (rport)
+ lport->tt.rport_recv_req(sp, fp, rport);
+ else {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp,
+ ELS_LS_RJT,
+ &rjt_data);
+ fc_frame_free(fp);
+ }
+ }
+ } else {
+ FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
+ fc_frame_free(fp);
+ }
+ mutex_unlock(&lport->lp_mutex);
+
+ /*
+ * The common exch_done for all request may not be good
+ * if any request requires longer hold on exhange. XXX
+ */
+ lport->tt.exch_done(sp);
+}
+
+/**
+ * fc_lport_reset - Reset an lport
+ * @lport: The lport which should be reset
+ *
+ * Locking Note: This functions should not be called with the
+ * lport lock held.
+ */
+int fc_lport_reset(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_reset);
+
+/**
+ * fc_rport_enter_reset - Reset the local port
+ * @lport: Fibre Channel local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_reset(struct fc_lport *lport)
+{
+ FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
+
+ if (lport->dns_rp)
+ lport->tt.rport_logoff(lport->dns_rp);
+
+ if (lport->ptp_rp) {
+ lport->tt.rport_logoff(lport->ptp_rp);
+ lport->ptp_rp = NULL;
+ }
+
+ lport->tt.disc_stop(lport);
+
+ lport->tt.exch_mgr_reset(lport->emp, 0, 0);
+ fc_host_fabric_name(lport->host) = 0;
+ fc_host_port_id(lport->host) = 0;
+
+ if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
+ fc_lport_enter_flogi(lport);
+}
+
+/**
+ * fc_lport_error - Handler for any errors
+ * @lport: The fc_lport object
+ * @fp: The frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * after the e_d_tov time.
+ */
+static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
+{
+ unsigned long delay = 0;
+ FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_lport_state(lport),
+ lport->retry_count);
+
+ if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ /*
+ * Memory allocation failure, or the exchange timed out.
+ * Retry after delay
+ */
+ if (lport->retry_count < lport->max_retry_count) {
+ lport->retry_count++;
+ if (!fp)
+ delay = msecs_to_jiffies(500);
+ else
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ schedule_delayed_work(&lport->retry_work, delay);
+ } else {
+ switch (lport->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_READY:
+ case LPORT_ST_RESET:
+ case LPORT_ST_RPN_ID:
+ case LPORT_ST_RFT_ID:
+ case LPORT_ST_SCR:
+ case LPORT_ST_DNS:
+ case LPORT_ST_FLOGI:
+ case LPORT_ST_LOGO:
+ fc_lport_enter_reset(lport);
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * fc_lport_rft_id_resp - Handle response to Register Fibre
+ * Channel Types by ID (RPN_ID) request
+ * @sp: current sequence in RPN_ID exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_rft_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ FC_DEBUG_LPORT("Received a RFT_ID response\n");
+
+ if (lport->state != LPORT_ST_RFT_ID) {
+ FC_DBG("Received a RFT_ID response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_DIR &&
+ ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+ ntohs(ct->ct_cmd) == FC_FS_ACC)
+ fc_lport_enter_scr(lport);
+ else
+ fc_lport_error(lport, fp);
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_rpn_id_resp - Handle response to Register Port
+ * Name by ID (RPN_ID) request
+ * @sp: current sequence in RPN_ID exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ FC_DEBUG_LPORT("Received a RPN_ID response\n");
+
+ if (lport->state != LPORT_ST_RPN_ID) {
+ FC_DBG("Received a RPN_ID response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_DIR &&
+ ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+ ntohs(ct->ct_cmd) == FC_FS_ACC)
+ fc_lport_enter_rft_id(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_scr_resp - Handle response to State Change Register (SCR) request
+ * @sp: current sequence in SCR exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the registration request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ FC_DEBUG_LPORT("Received a SCR response\n");
+
+ if (lport->state != LPORT_ST_SCR) {
+ FC_DBG("Received a SCR response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_ready(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_enter_scr - Send a State Change Register (SCR) request
+ * @lport: Fibre Channel local port to register for state changes
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_scr(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_SCR);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
+ fc_lport_scr_resp, lport, lport->e_d_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_lport_enter_rft_id - Register FC4-types with the name server
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_rft_id(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_ns_fts *lps;
+ int i;
+
+ FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
+
+ lps = &lport->fcts;
+ i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
+ while (--i >= 0)
+ if (ntohl(lps->ff_type_map[i]) != 0)
+ break;
+ if (i < 0) {
+ /* nothing to register, move on to SCR */
+ fc_lport_enter_scr(lport);
+ return;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_rft));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
+ fc_lport_rft_id_resp,
+ lport, lport->e_d_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_rport_enter_rft_id - Register port name with the name server
+ * @lport: Fibre Channel local port to register
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_rpn_id(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_rn_id));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
+ fc_lport_rpn_id_resp,
+ lport, lport->e_d_tov))
+ fc_lport_error(lport, fp);
+}
+
+static struct fc_rport_operations fc_lport_rport_ops = {
+ .event_callback = fc_lport_rport_callback,
+};
+
+/**
+ * fc_rport_enter_dns - Create a rport to the name server
+ * @lport: Fibre Channel local port requesting a rport for the name server
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_dns(struct fc_lport *lport)
+{
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata;
+ struct fc_disc_port dp;
+
+ dp.ids.port_id = FC_FID_DIR_SERV;
+ dp.ids.port_name = -1;
+ dp.ids.node_name = -1;
+ dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ dp.lp = lport;
+
+ FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_DNS);
+
+ rport = fc_rport_rogue_create(&dp);
+ if (!rport)
+ goto err;
+
+ rdata = rport->dd_data;
+ rdata->ops = &fc_lport_rport_ops;
+ lport->tt.rport_login(rport);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_timeout - Handler for the retry_work timer.
+ * @work: The work struct of the fc_lport
+ */
+static void fc_lport_timeout(struct work_struct *work)
+{
+ struct fc_lport *lport =
+ container_of(work, struct fc_lport,
+ retry_work.work);
+
+ mutex_lock(&lport->lp_mutex);
+
+ switch (lport->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_READY:
+ case LPORT_ST_RESET:
+ WARN_ON(1);
+ break;
+ case LPORT_ST_FLOGI:
+ fc_lport_enter_flogi(lport);
+ break;
+ case LPORT_ST_DNS:
+ fc_lport_enter_dns(lport);
+ break;
+ case LPORT_ST_RPN_ID:
+ fc_lport_enter_rpn_id(lport);
+ break;
+ case LPORT_ST_RFT_ID:
+ fc_lport_enter_rft_id(lport);
+ break;
+ case LPORT_ST_SCR:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_LOGO:
+ fc_lport_enter_logo(lport);
+ break;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_logo_resp - Handle response to LOGO request
+ * @sp: current sequence in LOGO exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the LOGO request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ FC_DEBUG_LPORT("Received a LOGO response\n");
+
+ if (lport->state != LPORT_ST_LOGO) {
+ FC_DBG("Received a LOGO response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_reset(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_rport_enter_logo - Logout of the fabric
+ * @lport: Fibre Channel local port to be logged out
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_logo(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_els_logo *logo;
+
+ FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
+ fc_host_port_id(lport->host), fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_LOGO);
+
+ /* DNS session should be closed so we can release it here */
+ if (lport->dns_rp)
+ lport->tt.rport_logoff(lport->dns_rp);
+
+ fp = fc_frame_alloc(lport, sizeof(*logo));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
+ lport, lport->e_d_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_lport_flogi_resp - Handle response to FLOGI request
+ * @sp: current sequence in FLOGI exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the FLOGI request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ u32 did;
+ u16 csp_flags;
+ unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ u16 mfs;
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ FC_DEBUG_LPORT("Received a FLOGI response\n");
+
+ if (lport->state != LPORT_ST_FLOGI) {
+ FC_DBG("Received a FLOGI response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ did = ntoh24(fh->fh_d_id);
+ if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
+
+ FC_DEBUG_LPORT("Assigned fid %x\n", did);
+ fc_host_port_id(lport->host) = did;
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (flp) {
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+ FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+ mfs < lport->mfs)
+ lport->mfs = mfs;
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+ e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = 2 * e_d_tov;
+ FC_DBG("Point-to-Point mode\n");
+ fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
+ get_unaligned_be64(
+ &flp->fl_wwpn),
+ get_unaligned_be64(
+ &flp->fl_wwnn));
+ } else {
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = r_a_tov;
+ fc_host_fabric_name(lport->host) =
+ get_unaligned_be64(&flp->fl_wwnn);
+ fc_lport_enter_dns(lport);
+ }
+ }
+
+ if (flp) {
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ lport->tt.disc_start(fc_lport_disc_callback,
+ lport);
+ }
+ }
+ } else {
+ FC_DBG("bad FLOGI response\n");
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_rport_enter_flogi - Send a FLOGI request to the fabric manager
+ * @lport: Fibre Channel local port to be logged in to the fabric
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+void fc_lport_enter_flogi(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ FC_DEBUG_LPORT("Processing FLOGI state\n");
+
+ fc_lport_state_enter(lport, LPORT_ST_FLOGI);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp)
+ return fc_lport_error(lport, fp);
+
+ if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
+ fc_lport_flogi_resp, lport, lport->e_d_tov))
+ fc_lport_error(lport, fp);
+}
+
+/* Configure a fc_lport */
+int fc_lport_config(struct fc_lport *lport)
+{
+ INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
+ mutex_init(&lport->lp_mutex);
+
+ fc_lport_state_enter(lport, LPORT_ST_NONE);
+
+ fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
+ fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+int fc_lport_init(struct fc_lport *lport)
+{
+ if (!lport->tt.lport_recv)
+ lport->tt.lport_recv = fc_lport_recv_req;
+
+ if (!lport->tt.lport_reset)
+ lport->tt.lport_reset = fc_lport_reset;
+
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(lport->host) = lport->wwnn;
+ fc_host_port_name(lport->host) = lport->wwpn;
+ fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(lport->host), 0,
+ sizeof(fc_host_supported_fc4s(lport->host)));
+ fc_host_supported_fc4s(lport->host)[2] = 1;
+ fc_host_supported_fc4s(lport->host)[7] = 1;
+
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(lport->host), 0,
+ sizeof(fc_host_active_fc4s(lport->host)));
+ fc_host_active_fc4s(lport->host)[2] = 1;
+ fc_host_active_fc4s(lport->host)[7] = 1;
+ fc_host_maxframe_size(lport->host) = lport->mfs;
+ fc_host_supported_speeds(lport->host) = 0;
+ if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000000..e780d8caf70e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1291 @@
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * RPORT GENERAL INFO
+ *
+ * This file contains all processing regarding fc_rports. It contains the
+ * rport state machine and does all rport interaction with the transport class.
+ * There should be no other places in libfc that interact directly with the
+ * transport class in regards to adding and deleting rports.
+ *
+ * fc_rport's represent N_Port's within the fabric.
+ */
+
+/*
+ * RPORT LOCKING
+ *
+ * The rport should never hold the rport mutex and then attempt to acquire
+ * either the lport or disc mutexes. The rport's mutex is considered lesser
+ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
+ * more comments on the heirarchy.
+ *
+ * The locking strategy is similar to the lport's strategy. The lock protects
+ * the rport's states and is held and released by the entry points to the rport
+ * block. All _enter_* functions correspond to rport states and expect the rport
+ * mutex to be locked before calling them. This means that rports only handle
+ * one request or response at a time, since they're not critical for the I/O
+ * path this potential over-use of the mutex is acceptable.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+static int fc_rport_debug;
+
+#define FC_DEBUG_RPORT(fmt...) \
+ do { \
+ if (fc_rport_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+struct workqueue_struct *rport_event_queue;
+
+static void fc_rport_enter_plogi(struct fc_rport *);
+static void fc_rport_enter_prli(struct fc_rport *);
+static void fc_rport_enter_rtv(struct fc_rport *);
+static void fc_rport_enter_ready(struct fc_rport *);
+static void fc_rport_enter_logo(struct fc_rport *);
+
+static void fc_rport_recv_plogi_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+static void fc_rport_error(struct fc_rport *, struct fc_frame *);
+static void fc_rport_work(struct work_struct *);
+
+static const char *fc_rport_state_names[] = {
+ [RPORT_ST_NONE] = "None",
+ [RPORT_ST_INIT] = "Init",
+ [RPORT_ST_PLOGI] = "PLOGI",
+ [RPORT_ST_PRLI] = "PRLI",
+ [RPORT_ST_RTV] = "RTV",
+ [RPORT_ST_READY] = "Ready",
+ [RPORT_ST_LOGO] = "LOGO",
+};
+
+static void fc_rport_rogue_destroy(struct device *dev)
+{
+ struct fc_rport *rport = dev_to_rport(dev);
+ FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
+ kfree(rport);
+}
+
+struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
+{
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rdata;
+ rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
+
+ if (!rport)
+ return NULL;
+
+ rdata = RPORT_TO_PRIV(rport);
+
+ rport->dd_data = rdata;
+ rport->port_id = dp->ids.port_id;
+ rport->port_name = dp->ids.port_name;
+ rport->node_name = dp->ids.node_name;
+ rport->roles = dp->ids.roles;
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ /*
+ * Note: all this libfc rogue rport code will be removed for
+ * upstream so it fine that this is really ugly and hacky right now.
+ */
+ device_initialize(&rport->dev);
+ rport->dev.release = fc_rport_rogue_destroy;
+
+ mutex_init(&rdata->rp_mutex);
+ rdata->local_port = dp->lp;
+ rdata->trans_state = FC_PORTSTATE_ROGUE;
+ rdata->rp_state = RPORT_ST_INIT;
+ rdata->event = RPORT_EV_NONE;
+ rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ rdata->ops = NULL;
+ rdata->e_d_tov = dp->lp->e_d_tov;
+ rdata->r_a_tov = dp->lp->r_a_tov;
+ INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+ INIT_WORK(&rdata->event_work, fc_rport_work);
+ /*
+ * For good measure, but not necessary as we should only
+ * add REAL rport to the lport list.
+ */
+ INIT_LIST_HEAD(&rdata->peers);
+
+ return rport;
+}
+
+/**
+ * fc_rport_state - return a string for the state the rport is in
+ * @rport: The rport whose state we want to get a string for
+ */
+static const char *fc_rport_state(struct fc_rport *rport)
+{
+ const char *cp;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+ cp = fc_rport_state_names[rdata->rp_state];
+ if (!cp)
+ cp = "Unknown";
+ return cp;
+}
+
+/**
+ * fc_set_rport_loss_tmo - Set the remote port loss timeout in seconds.
+ * @rport: Pointer to Fibre Channel remote port structure
+ * @timeout: timeout in seconds
+ */
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout + 5;
+ else
+ rport->dev_loss_tmo = 30;
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+/**
+ * fc_plogi_get_maxframe - Get max payload from the common service parameters
+ * @flp: FLOGI payload structure
+ * @maxval: upper limit, may be less than what is in the service parameters
+ */
+static unsigned int
+fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
+{
+ unsigned int mfs;
+
+ /*
+ * Get max payload from the common service parameters and the
+ * class 3 receive data field size.
+ */
+ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ return maxval;
+}
+
+/**
+ * fc_rport_state_enter - Change the rport's state
+ * @rport: The rport whose state should change
+ * @new: The new state of the rport
+ *
+ * Locking Note: Called with the rport lock held
+ */
+static void fc_rport_state_enter(struct fc_rport *rport,
+ enum fc_rport_state new)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ if (rdata->rp_state != new)
+ rdata->retries = 0;
+ rdata->rp_state = new;
+}
+
+static void fc_rport_work(struct work_struct *work)
+{
+ struct fc_rport_libfc_priv *rdata =
+ container_of(work, struct fc_rport_libfc_priv, event_work);
+ enum fc_rport_event event;
+ enum fc_rport_trans_state trans_state;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport_operations *rport_ops;
+ struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+
+ mutex_lock(&rdata->rp_mutex);
+ event = rdata->event;
+ rport_ops = rdata->ops;
+
+ if (event == RPORT_EV_CREATED) {
+ struct fc_rport *new_rport;
+ struct fc_rport_libfc_priv *new_rdata;
+ struct fc_rport_identifiers ids;
+
+ ids.port_id = rport->port_id;
+ ids.roles = rport->roles;
+ ids.port_name = rport->port_name;
+ ids.node_name = rport->node_name;
+
+ mutex_unlock(&rdata->rp_mutex);
+
+ new_rport = fc_remote_port_add(lport->host, 0, &ids);
+ if (new_rport) {
+ /*
+ * Switch from the rogue rport to the rport
+ * returned by the FC class.
+ */
+ new_rport->maxframe_size = rport->maxframe_size;
+
+ new_rdata = new_rport->dd_data;
+ new_rdata->e_d_tov = rdata->e_d_tov;
+ new_rdata->r_a_tov = rdata->r_a_tov;
+ new_rdata->ops = rdata->ops;
+ new_rdata->local_port = rdata->local_port;
+ new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ new_rdata->trans_state = FC_PORTSTATE_REAL;
+ mutex_init(&new_rdata->rp_mutex);
+ INIT_DELAYED_WORK(&new_rdata->retry_work,
+ fc_rport_timeout);
+ INIT_LIST_HEAD(&new_rdata->peers);
+ INIT_WORK(&new_rdata->event_work, fc_rport_work);
+
+ fc_rport_state_enter(new_rport, RPORT_ST_READY);
+ } else {
+ FC_DBG("Failed to create the rport for port "
+ "(%6x).\n", ids.port_id);
+ event = RPORT_EV_FAILED;
+ }
+ put_device(&rport->dev);
+ rport = new_rport;
+ rdata = new_rport->dd_data;
+ if (rport_ops->event_callback)
+ rport_ops->event_callback(lport, rport, event);
+ } else if ((event == RPORT_EV_FAILED) ||
+ (event == RPORT_EV_LOGO) ||
+ (event == RPORT_EV_STOP)) {
+ trans_state = rdata->trans_state;
+ mutex_unlock(&rdata->rp_mutex);
+ if (rport_ops->event_callback)
+ rport_ops->event_callback(lport, rport, event);
+ if (trans_state == FC_PORTSTATE_ROGUE)
+ put_device(&rport->dev);
+ else
+ fc_remote_port_delete(rport);
+ } else
+ mutex_unlock(&rdata->rp_mutex);
+}
+
+/**
+ * fc_rport_login - Start the remote port login state machine
+ * @rport: Fibre Channel remote port
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+int fc_rport_login(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
+
+ fc_rport_enter_plogi(rport);
+
+ mutex_unlock(&rdata->rp_mutex);
+
+ return 0;
+}
+
+/**
+ * fc_rport_logoff - Logoff and remove an rport
+ * @rport: Fibre Channel remote port to be removed
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+int fc_rport_logoff(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
+
+ fc_rport_enter_logo(rport);
+
+ /*
+ * Change the state to NONE so that we discard
+ * the response.
+ */
+ fc_rport_state_enter(rport, RPORT_ST_NONE);
+
+ mutex_unlock(&rdata->rp_mutex);
+
+ cancel_delayed_work_sync(&rdata->retry_work);
+
+ mutex_lock(&rdata->rp_mutex);
+
+ rdata->event = RPORT_EV_STOP;
+ queue_work(rport_event_queue, &rdata->event_work);
+
+ mutex_unlock(&rdata->rp_mutex);
+
+ return 0;
+}
+
+/**
+ * fc_rport_enter_ready - The rport is ready
+ * @rport: Fibre Channel remote port that is ready
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_ready(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+
+ fc_rport_state_enter(rport, RPORT_ST_READY);
+
+ FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
+
+ rdata->event = RPORT_EV_CREATED;
+ queue_work(rport_event_queue, &rdata->event_work);
+}
+
+/**
+ * fc_rport_timeout - Handler for the retry_work timer.
+ * @work: The work struct of the fc_rport_libfc_priv
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+ struct fc_rport_libfc_priv *rdata =
+ container_of(work, struct fc_rport_libfc_priv, retry_work.work);
+ struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+
+ mutex_lock(&rdata->rp_mutex);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PLOGI:
+ fc_rport_enter_plogi(rport);
+ break;
+ case RPORT_ST_PRLI:
+ fc_rport_enter_prli(rport);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_rtv(rport);
+ break;
+ case RPORT_ST_LOGO:
+ fc_rport_enter_logo(rport);
+ break;
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ case RPORT_ST_NONE:
+ break;
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_error - Handler for any errors
+ * @rport: The fc_rport object
+ * @fp: The frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * immediately.
+ *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ unsigned long delay = 0;
+
+ FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+
+ if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ /*
+ * Memory allocation failure, or the exchange timed out.
+ * Retry after delay
+ */
+ if (rdata->retries < rdata->local_port->max_retry_count) {
+ rdata->retries++;
+ if (!fp)
+ delay = msecs_to_jiffies(500);
+ get_device(&rport->dev);
+ schedule_delayed_work(&rdata->retry_work, delay);
+ } else {
+ switch (rdata->rp_state) {
+ case RPORT_ST_PLOGI:
+ case RPORT_ST_PRLI:
+ case RPORT_ST_LOGO:
+ rdata->event = RPORT_EV_FAILED;
+ queue_work(rport_event_queue,
+ &rdata->event_work);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_ready(rport);
+ break;
+ case RPORT_ST_NONE:
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * fc_rport_plogi_recv_resp - Handle incoming ELS PLOGI response
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *plp;
+ unsigned int tov;
+ u16 csp_seq;
+ u16 cssp_seq;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
+ rport->port_id);
+
+ if (rdata->rp_state != RPORT_ST_PLOGI) {
+ FC_DBG("Received a PLOGI response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC &&
+ (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+ rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
+ rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
+
+ tov = ntohl(plp->fl_csp.sp_e_d_tov);
+ if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
+ tov /= 1000;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
+ csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+ cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+ if (cssp_seq < csp_seq)
+ csp_seq = cssp_seq;
+ rdata->max_seq = csp_seq;
+ rport->maxframe_size =
+ fc_plogi_get_maxframe(plp, lport->mfs);
+
+ /*
+ * If the rport is one of the well known addresses
+ * we skip PRLI and RTV and go straight to READY.
+ */
+ if (rport->port_id >= FC_FID_DOM_MGR)
+ fc_rport_enter_ready(rport);
+ else
+ fc_rport_enter_prli(rport);
+ } else
+ fc_rport_error(rport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_plogi - Send Port Login (PLOGI) request to peer
+ * @rport: Fibre Channel remote port to send PLOGI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_plogi(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
+ rport->port_id, fc_rport_state(rport));
+
+ fc_rport_state_enter(rport, RPORT_ST_PLOGI);
+
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+ rdata->e_d_tov = lport->e_d_tov;
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
+ fc_rport_plogi_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
+ else
+ get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_prli_resp - Process Login (PRLI) response handler
+ * @sp: current sequence in the PRLI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ u32 fcp_parm = 0;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
+ rport->port_id);
+
+ if (rdata->rp_state != RPORT_ST_PRLI) {
+ FC_DBG("Received a PRLI response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
+ fcp_parm = ntohl(pp->spp.spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ }
+
+ rport->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ rport->roles = roles;
+ fc_rport_enter_rtv(rport);
+
+ } else {
+ FC_DBG("Bad ELS response\n");
+ rdata->event = RPORT_EV_FAILED;
+ queue_work(rport_event_queue, &rdata->event_work);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_logo_resp - Logout (LOGO) response handler
+ * @sp: current sequence in the LOGO exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
+ rport->port_id);
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ goto err;
+ }
+
+ if (rdata->rp_state != RPORT_ST_LOGO) {
+ FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ fc_rport_enter_rtv(rport);
+ } else {
+ FC_DBG("Bad ELS response\n");
+ rdata->event = RPORT_EV_LOGO;
+ queue_work(rport_event_queue, &rdata->event_work);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_prli - Send Process Login (PRLI) request to peer
+ * @rport: Fibre Channel remote port to send PRLI to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_prli(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_frame *fp;
+
+ FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
+ rport->port_id, fc_rport_state(rport));
+
+ fc_rport_state_enter(rport, RPORT_ST_PRLI);
+
+ fp = fc_frame_alloc(lport, sizeof(*pp));
+ if (!fp) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
+ fc_rport_prli_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
+ else
+ get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_els_rtv_resp - Request Timeout Value response handler
+ * @sp: current sequence in the RTV exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Many targets don't seem to support this.
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
+ rport->port_id);
+
+ if (rdata->rp_state != RPORT_ST_RTV) {
+ FC_DBG("Received a RTV response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ struct fc_els_rtv_acc *rtv;
+ u32 toq;
+ u32 tov;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ if (rtv) {
+ toq = ntohl(rtv->rtv_toq);
+ tov = ntohl(rtv->rtv_r_a_tov);
+ if (tov == 0)
+ tov = 1;
+ rdata->r_a_tov = tov;
+ tov = ntohl(rtv->rtv_e_d_tov);
+ if (toq & FC_ELS_RTV_EDRES)
+ tov /= 1000000;
+ if (tov == 0)
+ tov = 1;
+ rdata->e_d_tov = tov;
+ }
+ }
+
+ fc_rport_enter_ready(rport);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+ put_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_rtv - Send Request Timeout Value (RTV) request to peer
+ * @rport: Fibre Channel remote port to send RTV to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_rtv(struct fc_rport *rport)
+{
+ struct fc_frame *fp;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+ FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
+ rport->port_id, fc_rport_state(rport));
+
+ fc_rport_state_enter(rport, RPORT_ST_RTV);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
+ if (!fp) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
+ fc_rport_rtv_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
+ else
+ get_device(&rport->dev);
+}
+
+/**
+ * fc_rport_enter_logo - Send Logout (LOGO) request to peer
+ * @rport: Fibre Channel remote port to send LOGO to
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_logo(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
+ rport->port_id, fc_rport_state(rport));
+
+ fc_rport_state_enter(rport, RPORT_ST_LOGO);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
+ if (!fp) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
+ fc_rport_logo_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
+ else
+ get_device(&rport->dev);
+}
+
+
+/**
+ * fc_rport_recv_req - Receive a request from a rport
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ * @rp_arg: Fibre Channel remote port
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+ struct fc_frame_header *fh;
+ struct fc_seq_els_data els_data;
+ u8 op;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ els_data.fp = NULL;
+ els_data.explan = ELS_EXPL_NONE;
+ els_data.reason = ELS_RJT_NONE;
+
+ fh = fc_frame_header_get(fp);
+
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+ case ELS_PLOGI:
+ fc_rport_recv_plogi_req(rport, sp, fp);
+ break;
+ case ELS_PRLI:
+ fc_rport_recv_prli_req(rport, sp, fp);
+ break;
+ case ELS_PRLO:
+ fc_rport_recv_prlo_req(rport, sp, fp);
+ break;
+ case ELS_LOGO:
+ fc_rport_recv_logo_req(rport, sp, fp);
+ break;
+ case ELS_RRQ:
+ els_data.fp = fp;
+ lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
+ break;
+ case ELS_REC:
+ els_data.fp = fp;
+ lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
+ break;
+ default:
+ els_data.reason = ELS_RJT_UNSUP;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+ break;
+ }
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+}
+
+/**
+ * fc_rport_recv_plogi_req - Handle incoming Port Login (PLOGI) request
+ * @rport: Fibre Channel remote port that initiated PLOGI
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: PLOGI request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_plogi_req(struct fc_rport *rport,
+ struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp = rx_fp;
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *pl;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+ u64 wwpn;
+ u64 wwnn;
+ enum fc_els_rjt_reason reject = 0;
+ u32 f_ctl;
+ rjt_data.fp = NULL;
+
+ fh = fc_frame_header_get(fp);
+
+ FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
+ "while in state %s\n", ntoh24(fh->fh_s_id),
+ fc_rport_state(rport));
+
+ sid = ntoh24(fh->fh_s_id);
+ pl = fc_frame_payload_get(fp, sizeof(*pl));
+ if (!pl) {
+ FC_DBG("incoming PLOGI from %x too short\n", sid);
+ WARN_ON(1);
+ /* XXX TBD: send reject? */
+ fc_frame_free(fp);
+ return;
+ }
+ wwpn = get_unaligned_be64(&pl->fl_wwpn);
+ wwnn = get_unaligned_be64(&pl->fl_wwnn);
+
+ /*
+ * If the session was just created, possibly due to the incoming PLOGI,
+ * set the state appropriately and accept the PLOGI.
+ *
+ * If we had also sent a PLOGI, and if the received PLOGI is from a
+ * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+ * "command already in progress".
+ *
+ * XXX TBD: If the session was ready before, the PLOGI should result in
+ * all outstanding exchanges being reset.
+ */
+ switch (rdata->rp_state) {
+ case RPORT_ST_INIT:
+ FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
+ "- reject\n", sid, wwpn);
+ reject = ELS_RJT_UNSUP;
+ break;
+ case RPORT_ST_PLOGI:
+ FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
+ sid, rdata->rp_state);
+ if (wwpn < lport->wwpn)
+ reject = ELS_RJT_INPROG;
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_READY:
+ FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
+ "- ignored for now\n", sid, rdata->rp_state);
+ /* XXX TBD - should reset */
+ break;
+ case RPORT_ST_NONE:
+ default:
+ FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
+ "state %d\n", sid, rdata->rp_state);
+ break;
+ }
+
+ if (reject) {
+ rjt_data.reason = reject;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+ } else {
+ fp = fc_frame_alloc(lport, sizeof(*pl));
+ if (fp == NULL) {
+ fp = rx_fp;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+ } else {
+ sp = lport->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ fc_rport_set_name(rport, wwpn, wwnn);
+
+ /*
+ * Get session payload size from incoming PLOGI.
+ */
+ rport->maxframe_size =
+ fc_plogi_get_maxframe(pl, lport->mfs);
+ fc_frame_free(rx_fp);
+ fc_plogi_fill(lport, fp, ELS_LS_ACC);
+
+ /*
+ * Send LS_ACC. If this fails,
+ * the originator should retry.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+ if (rdata->rp_state == RPORT_ST_PLOGI)
+ fc_rport_enter_prli(rport);
+ }
+ }
+}
+
+/**
+ * fc_rport_recv_prli_req - Handle incoming Process Login (PRLI) request
+ * @rport: Fibre Channel remote port that initiated PRLI
+ * @sp: current sequence in the PRLI exchange
+ * @fp: PRLI request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prli_req(struct fc_rport *rport,
+ struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_exch *ep;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
+ enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
+ enum fc_els_spp_resp resp;
+ struct fc_seq_els_data rjt_data;
+ u32 f_ctl;
+ u32 fcp_parm;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ rjt_data.fp = NULL;
+
+ fh = fc_frame_header_get(rx_fp);
+
+ FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
+ "while in state %s\n", ntoh24(fh->fh_s_id),
+ fc_rport_state(rport));
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ case RPORT_ST_READY:
+ reason = ELS_RJT_NONE;
+ break;
+ default:
+ break;
+ }
+ len = fr_len(rx_fp) - sizeof(*fh);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (pp == NULL) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ } else {
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ } else if (plen < len) {
+ len = plen;
+ }
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp)) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ }
+ rspp = &pp->spp;
+ }
+ if (reason != ELS_RJT_NONE ||
+ (fp = fc_frame_alloc(lport, len)) == NULL) {
+ rjt_data.reason = reason;
+ rjt_data.explan = explan;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ } else {
+ sp = lport->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
+
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ while (len >= plen) {
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+ resp = FC_SPP_RESP_ACK;
+ if (rspp->spp_flags & FC_SPP_RPA_VAL)
+ resp = FC_SPP_RESP_NO_PA;
+ switch (rspp->spp_type) {
+ case 0: /* common to all FC-4 types */
+ break;
+ case FC_TYPE_FCP:
+ fcp_parm = ntohl(rspp->spp_params);
+ if (fcp_parm * FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rport->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ rport->roles = roles;
+
+ spp->spp_params =
+ htonl(lport->service_params);
+ break;
+ default:
+ resp = FC_SPP_RESP_INVL;
+ break;
+ }
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
+ f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ ep = fc_seq_exch(sp);
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+ FC_TYPE_ELS, f_ctl, 0);
+ lport->tt.seq_send(lport, sp, fp);
+
+ /*
+ * Get lock and re-check state.
+ */
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ fc_rport_enter_ready(rport);
+ break;
+ case RPORT_ST_READY:
+ break;
+ default:
+ break;
+ }
+ }
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_prlo_req - Handle incoming Process Logout (PRLO) request
+ * @rport: Fibre Channel remote port that initiated PRLO
+ * @sp: current sequence in the PRLO exchange
+ * @fp: PRLO request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+ struct fc_frame_header *fh;
+ struct fc_seq_els_data rjt_data;
+
+ fh = fc_frame_header_get(fp);
+
+ FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
+ "while in state %s\n", ntoh24(fh->fh_s_id),
+ fc_rport_state(rport));
+
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_logo_req - Handle incoming Logout (LOGO) request
+ * @rport: Fibre Channel remote port that initiated LOGO
+ * @sp: current sequence in the LOGO exchange
+ * @fp: LOGO request frame
+ *
+ * Locking Note: The rport lock is exected to be held before calling
+ * this function.
+ */
+static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+ fh = fc_frame_header_get(fp);
+
+ FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
+ "while in state %s\n", ntoh24(fh->fh_s_id),
+ fc_rport_state(rport));
+
+ rdata->event = RPORT_EV_LOGO;
+ queue_work(rport_event_queue, &rdata->event_work);
+
+ lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+static void fc_rport_flush_queue(void)
+{
+ flush_workqueue(rport_event_queue);
+}
+
+
+int fc_rport_init(struct fc_lport *lport)
+{
+ if (!lport->tt.rport_login)
+ lport->tt.rport_login = fc_rport_login;
+
+ if (!lport->tt.rport_logoff)
+ lport->tt.rport_logoff = fc_rport_logoff;
+
+ if (!lport->tt.rport_recv_req)
+ lport->tt.rport_recv_req = fc_rport_recv_req;
+
+ if (!lport->tt.rport_flush_queue)
+ lport->tt.rport_flush_queue = fc_rport_flush_queue;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_rport_init);
+
+int fc_setup_rport()
+{
+ rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+ if (!rport_event_queue)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(fc_setup_rport);
+
+void fc_destroy_rport()
+{
+ destroy_workqueue(rport_event_queue);
+}
+EXPORT_SYMBOL(fc_destroy_rport);
+
+void fc_rport_terminate_io(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+ lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
+ lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
+}
+EXPORT_SYMBOL(fc_rport_terminate_io);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da7b67d30d9a..7225b6e2029e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -88,34 +88,47 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
- struct iscsi_data *hdr)
+/**
+ * iscsi_prep_data_out_pdu - initialize Data-Out
+ * @task: scsi command task
+ * @r2t: R2T info
+ * @hdr: iscsi data in pdu
+ *
+ * Notes:
+ * Initialize Data-Out within this R2T sequence and finds
+ * proper data_offset within this SCSI command.
+ *
+ * This function is called with connection lock taken.
+ **/
+void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t,
+ struct iscsi_data *hdr)
{
struct iscsi_conn *conn = task->conn;
+ unsigned int left = r2t->data_length - r2t->sent;
+
+ task->hdr_len = sizeof(struct iscsi_data);
memset(hdr, 0, sizeof(struct iscsi_data));
- hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
- hdr->datasn = cpu_to_be32(task->unsol_datasn);
- task->unsol_datasn++;
+ hdr->ttt = r2t->ttt;
+ hdr->datasn = cpu_to_be32(r2t->datasn);
+ r2t->datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
-
- hdr->itt = task->hdr->itt;
- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
- hdr->offset = cpu_to_be32(task->unsol_offset);
-
- if (task->unsol_count > conn->max_xmit_dlength) {
+ memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+ hdr->itt = task->hdr_itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
+ if (left > conn->max_xmit_dlength) {
hton24(hdr->dlength, conn->max_xmit_dlength);
- task->data_count = conn->max_xmit_dlength;
- task->unsol_offset += task->data_count;
+ r2t->data_count = conn->max_xmit_dlength;
hdr->flags = 0;
} else {
- hton24(hdr->dlength, task->unsol_count);
- task->data_count = task->unsol_count;
+ hton24(hdr->dlength, left);
+ r2t->data_count = left;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
}
+ conn->dataout_pdus_cnt++;
}
-EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu);
static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
{
@@ -206,11 +219,24 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
- struct iscsi_cmd *hdr = task->hdr;
struct scsi_cmnd *sc = task->sc;
+ struct iscsi_cmd *hdr;
unsigned hdrlength, cmd_len;
+ itt_t itt;
int rc;
+ rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+ if (rc)
+ return rc;
+ hdr = (struct iscsi_cmd *) task->hdr;
+ itt = hdr->itt;
+ memset(hdr, 0, sizeof(*hdr));
+
+ if (session->tt->parse_pdu_itt)
+ hdr->itt = task->hdr_itt = itt;
+ else
+ hdr->itt = task->hdr_itt = build_itt(task->itt,
+ task->conn->session->age);
task->hdr_len = 0;
rc = iscsi_add_hdr(task, sizeof(*hdr));
if (rc)
@@ -218,8 +244,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
- hdr->itt = build_itt(task->itt, session->age);
- hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ memcpy(task->lun, hdr->lun, sizeof(task->lun));
+ hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
cmd_len = sc->cmd_len;
@@ -242,6 +268,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
}
if (sc->sc_data_direction == DMA_TO_DEVICE) {
unsigned out_len = scsi_out(sc)->length;
+ struct iscsi_r2t_info *r2t = &task->unsol_r2t;
+
hdr->data_length = cpu_to_be32(out_len);
hdr->flags |= ISCSI_FLAG_CMD_WRITE;
/*
@@ -254,13 +282,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
* without R2T ack right after
* immediate data
*
- * r2t_data_count bytes to be sent via R2T ack's
+ * r2t data_length bytes to be sent via R2T ack's
*
* pad_count bytes to be sent as zero-padding
*/
- task->unsol_count = 0;
- task->unsol_offset = 0;
- task->unsol_datasn = 0;
+ memset(r2t, 0, sizeof(*r2t));
if (session->imm_data_en) {
if (out_len >= session->first_burst)
@@ -274,12 +300,14 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
- task->unsol_count = min(session->first_burst, out_len)
- - task->imm_count;
- task->unsol_offset = task->imm_count;
+ r2t->data_length = min(session->first_burst, out_len) -
+ task->imm_count;
+ r2t->data_offset = task->imm_count;
+ r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+ r2t->exp_statsn = cpu_to_be32(conn->exp_statsn);
}
- if (!task->unsol_count)
+ if (!task->unsol_r2t.data_length)
/* No unsolicit Data-Out's */
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
@@ -300,8 +328,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
WARN_ON(hdrlength >= 256);
hdr->hlength = hdrlength & 0xFF;
- if (conn->session->tt->init_task &&
- conn->session->tt->init_task(task))
+ if (session->tt->init_task && session->tt->init_task(task))
return -EIO;
task->state = ISCSI_TASK_RUNNING;
@@ -332,6 +359,7 @@ static void iscsi_complete_command(struct iscsi_task *task)
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
+ session->tt->cleanup_task(task);
list_del_init(&task->running);
task->state = ISCSI_TASK_COMPLETED;
task->sc = NULL;
@@ -402,13 +430,6 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
* the cmd in the sequencing
*/
conn->session->queued_cmdsn--;
- else
- conn->session->tt->cleanup_task(conn, task);
- /*
- * Check if cleanup_task dropped the lock and the command completed,
- */
- if (!task->sc)
- return;
sc->result = err;
if (!scsi_bidi_cmnd(sc))
@@ -428,7 +449,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_session *session = conn->session;
- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+ struct iscsi_hdr *hdr = task->hdr;
struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
@@ -442,7 +463,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
*/
nop->cmdsn = cpu_to_be32(session->cmdsn);
if (hdr->itt != RESERVED_ITT) {
- hdr->itt = build_itt(task->itt, session->age);
/*
* TODO: We always use immediate, so we never hit this.
* If we start to send tmfs or nops as non-immediate then
@@ -455,12 +475,13 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
}
}
- if (session->tt->init_task)
- session->tt->init_task(task);
+ if (session->tt->init_task && session->tt->init_task(task))
+ return -EIO;
if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
session->state = ISCSI_STATE_LOGGING_OUT;
+ task->state = ISCSI_TASK_RUNNING;
list_move_tail(&task->running, &conn->mgmt_run_list);
debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
@@ -474,6 +495,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
struct iscsi_session *session = conn->session;
struct iscsi_task *task;
+ itt_t itt;
if (session->state == ISCSI_STATE_TERMINATE)
return NULL;
@@ -494,12 +516,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (!__kfifo_get(session->cmdpool.queue,
(void*)&task, sizeof(void*)))
return NULL;
-
- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
- hdr->ttt == RESERVED_ITT) {
- conn->ping_task = task;
- conn->last_ping = jiffies;
- }
}
/*
* released in complete pdu for task we expect a response for, and
@@ -516,23 +532,47 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else
task->data_count = 0;
+ if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+ "pdu for mgmt task.\n");
+ goto requeue_task;
+ }
+ itt = task->hdr->itt;
+ task->hdr_len = sizeof(struct iscsi_hdr);
memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+
+ if (hdr->itt != RESERVED_ITT) {
+ if (session->tt->parse_pdu_itt)
+ task->hdr->itt = itt;
+ else
+ task->hdr->itt = build_itt(task->itt,
+ task->conn->session->age);
+ }
+
INIT_LIST_HEAD(&task->running);
list_add_tail(&task->running, &conn->mgmtqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
- if (iscsi_prep_mgmt_task(conn, task)) {
- __iscsi_put_task(task);
- return NULL;
- }
+ if (iscsi_prep_mgmt_task(conn, task))
+ goto free_task;
if (session->tt->xmit_task(task))
- task = NULL;
+ goto free_task;
} else
scsi_queue_work(conn->session->host, &conn->xmitwork);
return task;
+
+free_task:
+ __iscsi_put_task(task);
+ return NULL;
+
+requeue_task:
+ if (task != conn->login_task)
+ __kfifo_put(session->cmdpool.queue, (void*)&task,
+ sizeof(void*));
+ return NULL;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -633,6 +673,40 @@ out:
__iscsi_put_task(task);
}
+/**
+ * iscsi_data_in_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi pdu
+ * @task: scsi command task
+ **/
+static void
+iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ struct iscsi_task *task)
+{
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr;
+ struct scsi_cmnd *sc = task->sc;
+
+ if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+ return;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+ if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+ ISCSI_FLAG_DATA_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_in(sc)->length))
+ scsi_in(sc)->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+ }
+
+ conn->scsirsp_pdus_cnt++;
+ __iscsi_put_task(task);
+}
+
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
@@ -674,6 +748,11 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!task)
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+ else if (!rhdr) {
+ /* only track our nops */
+ conn->ping_task = task;
+ conn->last_ping = jiffies;
+ }
}
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -681,7 +760,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
{
struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
struct iscsi_hdr rejected_pdu;
- uint32_t itt;
conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
@@ -691,10 +769,9 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
- itt = get_itt(rejected_pdu.itt);
iscsi_conn_printk(KERN_ERR, conn,
- "itt 0x%x had pdu (op 0x%x) rejected "
- "due to DataDigest error.\n", itt,
+ "pdu (op 0x%x) rejected "
+ "due to DataDigest error.\n",
rejected_pdu.opcode);
}
}
@@ -714,12 +791,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
- uint32_t i;
+ int i;
if (itt == RESERVED_ITT)
return NULL;
- i = get_itt(itt);
+ if (session->tt->parse_pdu_itt)
+ session->tt->parse_pdu_itt(conn, itt, &i, NULL);
+ else
+ i = get_itt(itt);
if (i >= session->cmds_max)
return NULL;
@@ -818,12 +898,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
break;
case ISCSI_OP_SCSI_DATA_IN:
- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- conn->scsirsp_pdus_cnt++;
- iscsi_update_cmdsn(session,
- (struct iscsi_nopin*) hdr);
- __iscsi_put_task(task);
- }
+ iscsi_data_in_rsp(conn, hdr, task);
break;
case ISCSI_OP_LOGOUT_RSP:
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -899,20 +974,25 @@ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
- uint32_t i;
+ int age = 0, i = 0;
if (itt == RESERVED_ITT)
return 0;
- if (((__force u32)itt & ISCSI_AGE_MASK) !=
- (session->age << ISCSI_AGE_SHIFT)) {
+ if (session->tt->parse_pdu_itt)
+ session->tt->parse_pdu_itt(conn, itt, &i, &age);
+ else {
+ i = get_itt(itt);
+ age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK;
+ }
+
+ if (age != session->age) {
iscsi_conn_printk(KERN_ERR, conn,
"received itt %x expected session age (%x)\n",
(__force u32)itt, session->age);
return ISCSI_ERR_BAD_ITT;
}
- i = get_itt(itt);
if (i >= session->cmds_max) {
iscsi_conn_printk(KERN_ERR, conn,
"received invalid itt index %u (max cmds "
@@ -954,6 +1034,38 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
}
EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
+void iscsi_session_failure(struct iscsi_cls_session *cls_session,
+ enum iscsi_err err)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn;
+ struct device *dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&session->lock, flags);
+ conn = session->leadconn;
+ if (session->state == ISCSI_STATE_TERMINATE || !conn) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ return;
+ }
+
+ dev = get_device(&conn->cls_conn->dev);
+ spin_unlock_irqrestore(&session->lock, flags);
+ if (!dev)
+ return;
+ /*
+ * if the host is being removed bypass the connection
+ * recovery initialization because we are going to kill
+ * the session.
+ */
+ if (err == ISCSI_ERR_INVALID_HOST)
+ iscsi_conn_error_event(conn->cls_conn, err);
+ else
+ iscsi_conn_failure(conn, err);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_session_failure);
+
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
struct iscsi_session *session = conn->session;
@@ -968,9 +1080,10 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
spin_unlock_irqrestore(&session->lock, flags);
+
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error(conn->cls_conn, err);
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -1080,8 +1193,13 @@ check_mgmt:
fail_command(conn, conn->task, DID_IMM_RETRY << 16);
continue;
}
- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
- fail_command(conn, conn->task, DID_ABORT << 16);
+ rc = iscsi_prep_scsi_cmd_pdu(conn->task);
+ if (rc) {
+ if (rc == -ENOMEM) {
+ conn->task = NULL;
+ goto again;
+ } else
+ fail_command(conn, conn->task, DID_ABORT << 16);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1139,6 +1257,26 @@ static void iscsi_xmitworker(struct work_struct *work)
} while (rc >= 0 || rc == -EAGAIN);
}
+static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
+ struct scsi_cmnd *sc)
+{
+ struct iscsi_task *task;
+
+ if (!__kfifo_get(conn->session->cmdpool.queue,
+ (void *) &task, sizeof(void *)))
+ return NULL;
+
+ sc->SCp.phase = conn->session->age;
+ sc->SCp.ptr = (char *) task;
+
+ atomic_set(&task->refcount, 1);
+ task->state = ISCSI_TASK_PENDING;
+ task->conn = conn;
+ task->sc = sc;
+ INIT_LIST_HEAD(&task->running);
+ return task;
+}
+
enum {
FAILURE_BAD_HOST = 1,
FAILURE_SESSION_FAILED,
@@ -1194,15 +1332,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
switch (session->state) {
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
- sc->result = DID_IMM_RETRY << 16;
- break;
+ goto reject;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
- sc->result = DID_IMM_RETRY << 16;
- break;
+ goto reject;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
- sc->result = DID_NO_CONNECT << 16;
+ sc->result = DID_TRANSPORT_FAILFAST << 16;
break;
case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
@@ -1227,33 +1363,27 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto reject;
}
- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
- sizeof(void*))) {
+ task = iscsi_alloc_task(conn, sc);
+ if (!task) {
reason = FAILURE_OOM;
goto reject;
}
- sc->SCp.phase = session->age;
- sc->SCp.ptr = (char *)task;
-
- atomic_set(&task->refcount, 1);
- task->state = ISCSI_TASK_PENDING;
- task->conn = conn;
- task->sc = sc;
- INIT_LIST_HEAD(&task->running);
list_add_tail(&task->running, &conn->xmitqueue);
if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
- if (iscsi_prep_scsi_cmd_pdu(task)) {
- sc->result = DID_ABORT << 16;
- sc->scsi_done = NULL;
- iscsi_complete_command(task);
- goto fault;
+ reason = iscsi_prep_scsi_cmd_pdu(task);
+ if (reason) {
+ if (reason == -ENOMEM) {
+ reason = FAILURE_OOM;
+ goto prepd_reject;
+ } else {
+ sc->result = DID_ABORT << 16;
+ goto prepd_fault;
+ }
}
if (session->tt->xmit_task(task)) {
- sc->scsi_done = NULL;
- iscsi_complete_command(task);
reason = FAILURE_SESSION_NOT_READY;
- goto reject;
+ goto prepd_reject;
}
} else
scsi_queue_work(session->host, &conn->xmitwork);
@@ -1263,12 +1393,18 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
spin_lock(host->host_lock);
return 0;
+prepd_reject:
+ sc->scsi_done = NULL;
+ iscsi_complete_command(task);
reject:
spin_unlock(&session->lock);
debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
spin_lock(host->host_lock);
- return SCSI_MLQUEUE_HOST_BUSY;
+ return SCSI_MLQUEUE_TARGET_BUSY;
+prepd_fault:
+ sc->scsi_done = NULL;
+ iscsi_complete_command(task);
fault:
spin_unlock(&session->lock);
debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
@@ -1307,7 +1443,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
}
EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
-int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+int iscsi_eh_target_reset(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -1321,7 +1457,7 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
- debug_scsi("failing host reset: session terminated "
+ debug_scsi("failing target reset: session terminated "
"[CID %d age %d]\n", conn->id, session->age);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1336,7 +1472,7 @@ failed:
*/
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ debug_scsi("iscsi_eh_target_reset wait for relogin\n");
wait_event_interruptible(conn->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1348,14 +1484,14 @@ failed:
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
iscsi_session_printk(KERN_INFO, session,
- "host reset succeeded\n");
+ "target reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
-EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
static void iscsi_tmf_timedout(unsigned long data)
{
@@ -1580,9 +1716,9 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
- hdr->rtt = task->hdr->itt;
- hdr->refcmdsn = task->hdr->cmdsn;
+ memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+ hdr->rtt = task->hdr_itt;
+ hdr->refcmdsn = task->cmdsn;
}
int iscsi_eh_abort(struct scsi_cmnd *sc)
@@ -1769,10 +1905,10 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_suspend_tx(conn);
- spin_lock(&session->lock);
+ spin_lock_bh(&session->lock);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
- spin_unlock(&session->lock);
+ spin_unlock_bh(&session->lock);
iscsi_start_tx(conn);
goto done;
@@ -1878,6 +2014,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
int dd_data_size, uint16_t qdepth)
{
struct Scsi_Host *shost;
+ struct iscsi_host *ihost;
shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
if (!shost)
@@ -1892,22 +2029,43 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
qdepth = ISCSI_DEF_CMD_PER_LUN;
}
shost->cmd_per_lun = qdepth;
+
+ ihost = shost_priv(shost);
+ spin_lock_init(&ihost->lock);
+ ihost->state = ISCSI_HOST_SETUP;
+ ihost->num_sessions = 0;
+ init_waitqueue_head(&ihost->session_removal_wq);
return shost;
}
EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST);
+}
+
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
*
- * This will also remove any sessions attached to the host, but if userspace
- * is managing the session at the same time this will break. TODO: add
- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
- * does not remove the memory from under us.
+ * If there are any sessions left, this will initiate the removal and wait
+ * for the completion.
*/
void iscsi_host_remove(struct Scsi_Host *shost)
{
- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->state = ISCSI_HOST_REMOVED;
+ spin_unlock_irqrestore(&ihost->lock, flags);
+
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ wait_event_interruptible(ihost->session_removal_wq,
+ ihost->num_sessions == 0);
+ if (signal_pending(current))
+ flush_signals(current);
+
scsi_remove_host(shost);
}
EXPORT_SYMBOL_GPL(iscsi_host_remove);
@@ -1923,6 +2081,27 @@ void iscsi_host_free(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(iscsi_host_free);
+static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
+ unsigned long flags;
+
+ shost = scsi_host_get(shost);
+ if (!shost) {
+ printk(KERN_ERR "Invalid state. Cannot notify host removal "
+ "of session teardown event because host already "
+ "removed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ ihost->num_sessions--;
+ if (ihost->num_sessions == 0)
+ wake_up(&ihost->session_removal_wq);
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ scsi_host_put(shost);
+}
+
/**
* iscsi_session_setup - create iscsi cls session and host and session
* @iscsit: iscsi transport template
@@ -1943,9 +2122,19 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
uint16_t cmds_max, int cmd_task_size,
uint32_t initial_cmdsn, unsigned int id)
{
+ struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_session *session;
struct iscsi_cls_session *cls_session;
int cmd_i, scsi_cmds, total_cmds = cmds_max;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->lock, flags);
+ if (ihost->state == ISCSI_HOST_REMOVED) {
+ spin_unlock_irqrestore(&ihost->lock, flags);
+ return NULL;
+ }
+ ihost->num_sessions++;
+ spin_unlock_irqrestore(&ihost->lock, flags);
if (!total_cmds)
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
@@ -1958,7 +2147,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
"must be a power of two that is at least %d.\n",
total_cmds, ISCSI_TOTAL_CMDS_MIN);
- return NULL;
+ goto dec_session_count;
}
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
@@ -1982,7 +2171,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
cls_session = iscsi_alloc_session(shost, iscsit,
sizeof(struct iscsi_session));
if (!cls_session)
- return NULL;
+ goto dec_session_count;
session = cls_session->dd_data;
session->cls_session = cls_session;
session->host = shost;
@@ -2021,6 +2210,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
if (iscsi_add_session(cls_session, id))
goto cls_session_fail;
+
return cls_session;
cls_session_fail:
@@ -2029,6 +2219,8 @@ module_get_fail:
iscsi_pool_free(&session->cmdpool);
cmdpool_alloc_fail:
iscsi_free_session(cls_session);
+dec_session_count:
+ iscsi_host_dec_session_cnt(shost);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
@@ -2044,6 +2236,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
struct iscsi_session *session = cls_session->dd_data;
struct module *owner = cls_session->transport->owner;
+ struct Scsi_Host *shost = session->host;
iscsi_pool_free(&session->cmdpool);
@@ -2056,6 +2249,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->ifacename);
iscsi_destroy_session(cls_session);
+ iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -2111,7 +2305,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
}
spin_unlock_bh(&session->lock);
- data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ data = (char *) __get_free_pages(GFP_KERNEL,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
if (!data)
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
@@ -2182,7 +2377,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->lock);
- kfree(conn->data);
+ free_pages((unsigned long) conn->data,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
__kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
@@ -2335,8 +2531,10 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
- fail_all_commands(conn, -1,
- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
+ if (flag == STOP_CONN_RECOVER)
+ fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
+ else
+ fail_all_commands(conn, -1, DID_ERROR);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
new file mode 100644
index 000000000000..a745f91d2928
--- /dev/null
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -0,0 +1,1163 @@
+/*
+ * iSCSI over TCP/IP Data-Path lib
+ *
+ * Copyright (C) 2004 Dmitry Yusupov
+ * Copyright (C) 2004 Alex Aizman
+ * Copyright (C) 2005 - 2006 Mike Christie
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * maintained by open-iscsi@googlegroups.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Credits:
+ * Christoph Hellwig
+ * FUJITA Tomonori
+ * Arne Redlich
+ * Zhenyu Wang
+ */
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/inet.h>
+#include <linux/file.h>
+#include <linux/blkdev.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "iscsi_tcp.h"
+
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
+ "Dmitry Yusupov <dmitry_yus@yahoo.com>, "
+ "Alex Aizman <itn780@yahoo.com>");
+MODULE_DESCRIPTION("iSCSI/TCP data-path");
+MODULE_LICENSE("GPL");
+#undef DEBUG_TCP
+
+#ifdef DEBUG_TCP
+#define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
+#else
+#define debug_tcp(fmt...)
+#endif
+
+static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment);
+
+/*
+ * Scatterlist handling: inside the iscsi_segment, we
+ * remember an index into the scatterlist, and set data/size
+ * to the current scatterlist entry. For highmem pages, we
+ * kmap as needed.
+ *
+ * Note that the page is unmapped when we return from
+ * TCP's data_ready handler, so we may end up mapping and
+ * unmapping the same page repeatedly. The whole reason
+ * for this is that we shouldn't keep the page mapped
+ * outside the softirq.
+ */
+
+/**
+ * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+ * @segment: the buffer object
+ * @sg: scatterlist
+ * @offset: byte offset into that sg entry
+ *
+ * This function sets up the segment so that subsequent
+ * data is copied to the indicated sg entry, at the given
+ * offset.
+ */
+static inline void
+iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg, unsigned int offset)
+{
+ segment->sg = sg;
+ segment->sg_offset = offset;
+ segment->size = min(sg->length - offset,
+ segment->total_size - segment->total_copied);
+ segment->data = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_map - map the current S/G page
+ * @segment: iscsi_segment
+ * @recv: 1 if called from recv path
+ *
+ * We only need to possibly kmap data if scatter lists are being used,
+ * because the iscsi passthrough and internal IO paths will never use high
+ * mem pages.
+ */
+static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
+{
+ struct scatterlist *sg;
+
+ if (segment->data != NULL || !segment->sg)
+ return;
+
+ sg = segment->sg;
+ BUG_ON(segment->sg_mapped);
+ BUG_ON(sg->length == 0);
+
+ /*
+ * If the page count is greater than one it is ok to send
+ * to the network layer's zero copy send path. If not we
+ * have to go the slow sendmsg path. We always map for the
+ * recv path.
+ */
+ if (page_count(sg_page(sg)) >= 1 && !recv)
+ return;
+
+ debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+ segment);
+ segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+ segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
+}
+
+void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
+{
+ debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
+
+ if (segment->sg_mapped) {
+ debug_tcp("iscsi_tcp_segment_unmap valid\n");
+ kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+ segment->sg_mapped = NULL;
+ segment->data = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_unmap);
+
+/*
+ * Splice the digest buffer into the buffer
+ */
+static inline void
+iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
+{
+ segment->data = digest;
+ segment->digest_len = ISCSI_DIGEST_SIZE;
+ segment->total_size += ISCSI_DIGEST_SIZE;
+ segment->size = ISCSI_DIGEST_SIZE;
+ segment->copied = 0;
+ segment->sg = NULL;
+ segment->hash = NULL;
+}
+
+/**
+ * iscsi_tcp_segment_done - check whether the segment is complete
+ * @tcp_conn: iscsi tcp connection
+ * @segment: iscsi segment to check
+ * @recv: set to one of this is called from the recv path
+ * @copied: number of bytes copied
+ *
+ * Check if we're done receiving this segment. If the receive
+ * buffer is full but we expect more data, move on to the
+ * next entry in the scatterlist.
+ *
+ * If the amount of data we received isn't a multiple of 4,
+ * we will transparently receive the pad bytes, too.
+ *
+ * This function must be re-entrant.
+ */
+int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment, int recv,
+ unsigned copied)
+{
+ static unsigned char padbuf[ISCSI_PAD_LEN];
+ struct scatterlist sg;
+ unsigned int pad;
+
+ debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+ segment->size, recv ? "recv" : "xmit");
+ if (segment->hash && copied) {
+ /*
+ * If a segment is kmapd we must unmap it before sending
+ * to the crypto layer since that will try to kmap it again.
+ */
+ iscsi_tcp_segment_unmap(segment);
+
+ if (!segment->data) {
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, sg_page(segment->sg), copied,
+ segment->copied + segment->sg_offset +
+ segment->sg->offset);
+ } else
+ sg_init_one(&sg, segment->data + segment->copied,
+ copied);
+ crypto_hash_update(segment->hash, &sg, copied);
+ }
+
+ segment->copied += copied;
+ if (segment->copied < segment->size) {
+ iscsi_tcp_segment_map(segment, recv);
+ return 0;
+ }
+
+ segment->total_copied += segment->copied;
+ segment->copied = 0;
+ segment->size = 0;
+
+ /* Unmap the current scatterlist page, if there is one. */
+ iscsi_tcp_segment_unmap(segment);
+
+ /* Do we have more scatterlist entries? */
+ debug_tcp("total copied %u total size %u\n", segment->total_copied,
+ segment->total_size);
+ if (segment->total_copied < segment->total_size) {
+ /* Proceed to the next entry in the scatterlist. */
+ iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+ 0);
+ iscsi_tcp_segment_map(segment, recv);
+ BUG_ON(segment->size == 0);
+ return 0;
+ }
+
+ /* Do we need to handle padding? */
+ if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
+ pad = iscsi_padding(segment->total_copied);
+ if (pad != 0) {
+ debug_tcp("consume %d pad bytes\n", pad);
+ segment->total_size += pad;
+ segment->size = pad;
+ segment->data = padbuf;
+ return 0;
+ }
+ }
+
+ /*
+ * Set us up for transferring the data digest. hdr digest
+ * is completely handled in hdr done function.
+ */
+ if (segment->hash) {
+ crypto_hash_final(segment->hash, segment->digest);
+ iscsi_tcp_segment_splice_digest(segment,
+ recv ? segment->recv_digest : segment->digest);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
+
+/**
+ * iscsi_tcp_segment_recv - copy data to segment
+ * @tcp_conn: the iSCSI TCP connection
+ * @segment: the buffer to copy to
+ * @ptr: data pointer
+ * @len: amount of data available
+ *
+ * This function copies up to @len bytes to the
+ * given buffer, and returns the number of bytes
+ * consumed, which can actually be less than @len.
+ *
+ * If hash digest is enabled, the function will update the
+ * hash while copying.
+ * Combining these two operations doesn't buy us a lot (yet),
+ * but in the future we could implement combined copy+crc,
+ * just way we do for network layer checksums.
+ */
+static int
+iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment, const void *ptr,
+ unsigned int len)
+{
+ unsigned int copy = 0, copied = 0;
+
+ while (!iscsi_tcp_segment_done(tcp_conn, segment, 1, copy)) {
+ if (copied == len) {
+ debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+ len);
+ break;
+ }
+
+ copy = min(len - copied, segment->size - segment->copied);
+ debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+ memcpy(segment->data + segment->copied, ptr + copied, copy);
+ copied += copy;
+ }
+ return copied;
+}
+
+inline void
+iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, hdr, hdrlen);
+ crypto_hash_digest(hash, &sg, hdrlen, digest);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
+
+static inline int
+iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ if (!segment->digest_len)
+ return 1;
+
+ if (memcmp(segment->recv_digest, segment->digest,
+ segment->digest_len)) {
+ debug_scsi("digest mismatch\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Helper function to set up segment buffer
+ */
+static inline void
+__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ memset(segment, 0, sizeof(*segment));
+ segment->total_size = size;
+ segment->done = done;
+
+ if (hash) {
+ segment->hash = hash;
+ crypto_hash_init(hash);
+ }
+}
+
+inline void
+iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+ size_t size, iscsi_segment_done_fn_t *done,
+ struct hash_desc *hash)
+{
+ __iscsi_segment_init(segment, size, done, hash);
+ segment->data = data;
+ segment->size = size;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_init_linear);
+
+inline int
+iscsi_segment_seek_sg(struct iscsi_segment *segment,
+ struct scatterlist *sg_list, unsigned int sg_count,
+ unsigned int offset, size_t size,
+ iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+{
+ struct scatterlist *sg;
+ unsigned int i;
+
+ debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+ offset, size);
+ __iscsi_segment_init(segment, size, done, hash);
+ for_each_sg(sg_list, sg, sg_count, i) {
+ debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+ sg->offset);
+ if (offset < sg->length) {
+ iscsi_tcp_segment_init_sg(segment, sg, offset);
+ return 0;
+ }
+ offset -= sg->length;
+ }
+
+ return ISCSI_ERR_DATA_OFFSET;
+}
+EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
+
+/**
+ * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+ * @tcp_conn: iscsi connection to prep for
+ *
+ * This function always passes NULL for the hash argument, because when this
+ * function is called we do not yet know the final size of the header and want
+ * to delay the digest processing until we know that.
+ */
+void iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+ tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+ iscsi_tcp_hdr_recv_done, NULL);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_hdr_recv_prep);
+
+/*
+ * Handle incoming reply to any other type of command
+ */
+static int
+iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ int rc = 0;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+ conn->data, tcp_conn->in.datalen);
+ if (rc)
+ return rc;
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+}
+
+static void
+iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct hash_desc *rx_hash = NULL;
+
+ if (conn->datadgst_en &
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+ rx_hash = tcp_conn->rx_hash;
+
+ iscsi_segment_init_linear(&tcp_conn->in.segment,
+ conn->data, tcp_conn->in.datalen,
+ iscsi_tcp_data_recv_done, rx_hash);
+}
+
+/**
+ * iscsi_tcp_cleanup_task - free tcp_task resources
+ * @task: iscsi task
+ *
+ * must be called with session lock
+ */
+void iscsi_tcp_cleanup_task(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+ /* nothing to do for mgmt or pending tasks */
+ if (!task->sc || task->state == ISCSI_TASK_PENDING)
+ return;
+
+ /* flush task's r2t queues */
+ while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
+ }
+
+ r2t = tcp_task->r2t;
+ if (r2t != NULL) {
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ tcp_task->r2t = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task);
+
+/**
+ * iscsi_tcp_data_in - SCSI Data-In Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ int datasn = be32_to_cpu(rhdr->datasn);
+ unsigned total_in_length = scsi_in(task->sc)->length;
+
+ iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+ if (tcp_task->exp_datasn != datasn) {
+ debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+ __func__, tcp_task->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+ tcp_task->exp_datasn++;
+
+ tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+ if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+ __func__, tcp_task->data_offset,
+ tcp_conn->in.datalen, total_in_length);
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+ conn->datain_pdus_cnt++;
+ return 0;
+}
+
+/**
+ * iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+ * @task: scsi command task
+ */
+static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ struct iscsi_r2t_info *r2t;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2t with datalen %d\n",
+ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ if (tcp_task->exp_datasn != r2tsn){
+ debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+ __func__, tcp_task->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+ if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+ task->itt);
+ return 0;
+ }
+
+ rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ if (!rc) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
+ "Target has sent more R2Ts than it "
+ "negotiated for or driver has has leaked.\n");
+ return ISCSI_ERR_PROTO;
+ }
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = be32_to_cpu(rhdr->data_length);
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+
+ if (r2t->data_length > session->max_burst)
+ debug_scsi("invalid R2T with data len %u and max burst %u."
+ "Attempting to execute request.\n",
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+ if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+ r2t->data_offset, scsi_out(task->sc)->length);
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->datasn = 0;
+ r2t->sent = 0;
+
+ tcp_task->exp_datasn = r2tsn + 1;
+ __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+ iscsi_requeue_task(task);
+ return 0;
+}
+
+/*
+ * Handle incoming reply to DataIn command
+ */
+static int
+iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+ int rc;
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_DATA_DGST;
+
+ /* check for non-exceptional status */
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+ if (rc)
+ return rc;
+ }
+
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+}
+
+/**
+ * iscsi_tcp_hdr_dissect - process PDU header
+ * @conn: iSCSI connection
+ * @hdr: PDU header
+ *
+ * This function analyzes the header of the PDU received,
+ * and performs several sanity checks. If the PDU is accompanied
+ * by data, the receive buffer is set up to copy the incoming data
+ * to the correct location.
+ */
+static int
+iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+ int rc = 0, opcode, ahslen;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_task *task;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: datalen %d > %d\n",
+ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ /* Additional header segments. So far, we don't
+ * process additional headers.
+ */
+ ahslen = hdr->hlength << 2;
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+ rc = iscsi_verify_itt(conn, hdr->itt);
+ if (rc)
+ return rc;
+
+ debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+ opcode, ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+ spin_lock(&conn->session->lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else
+ rc = iscsi_tcp_data_in(conn, task);
+ if (rc) {
+ spin_unlock(&conn->session->lock);
+ break;
+ }
+
+ if (tcp_conn->in.datalen) {
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct hash_desc *rx_hash = NULL;
+ struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+ * Scatterlist case:
+ * We set up the iscsi_segment to point to the next
+ * scatterlist entry to copy to. As we go along,
+ * we move on to the next scatterlist entry and
+ * update the digest per-entry.
+ */
+ if (conn->datadgst_en &&
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
+ rx_hash = tcp_conn->rx_hash;
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+ tcp_task->data_offset,
+ tcp_conn->in.datalen);
+ rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+ sdb->table.sgl,
+ sdb->table.nents,
+ tcp_task->data_offset,
+ tcp_conn->in.datalen,
+ iscsi_tcp_process_data_in,
+ rx_hash);
+ spin_unlock(&conn->session->lock);
+ return rc;
+ }
+ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+ spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+ spin_lock(&conn->session->lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+ else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+ rc = iscsi_tcp_r2t_rsp(conn, task);
+ else
+ rc = ISCSI_ERR_PROTO;
+ spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_REJECT:
+ case ISCSI_OP_ASYNC_EVENT:
+ /*
+ * It is possible that we could get a PDU with a buffer larger
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+ if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: received buffer of "
+ "len %u but conn buffer is only %u "
+ "(opcode %0x)\n",
+ tcp_conn->in.datalen,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ /* If there's data coming in with the response,
+ * receive it to the connection's buffer.
+ */
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+ return 0;
+ }
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ default:
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+
+ if (rc == 0) {
+ /* Anything that comes with data should have
+ * been handled above. */
+ if (tcp_conn->in.datalen)
+ return ISCSI_ERR_PROTO;
+ iscsi_tcp_hdr_recv_prep(tcp_conn);
+ }
+
+ return rc;
+}
+
+/**
+ * iscsi_tcp_hdr_recv_done - process PDU header
+ *
+ * This is the callback invoked when the PDU header has
+ * been received. If the header is followed by additional
+ * header segments, we go back for more data.
+ */
+static int
+iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+ struct iscsi_segment *segment)
+{
+ struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+ struct iscsi_hdr *hdr;
+
+ /* Check if there are additional header segments
+ * *prior* to computing the digest, because we
+ * may need to go back to the caller for more.
+ */
+ hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+ if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+ /* Bump the header length - the caller will
+ * just loop around and get the AHS for us, and
+ * call again. */
+ unsigned int ahslen = hdr->hlength << 2;
+
+ /* Make sure we don't overflow */
+ if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+ return ISCSI_ERR_AHSLEN;
+
+ segment->total_size += ahslen;
+ segment->size += ahslen;
+ return 0;
+ }
+
+ /* We're done processing the header. See if we're doing
+ * header digests; if so, set up the recv_digest buffer
+ * and go back for more. */
+ if (conn->hdrdgst_en &&
+ !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD)) {
+ if (segment->digest_len == 0) {
+ /*
+ * Even if we offload the digest processing we
+ * splice it in so we can increment the skb/segment
+ * counters in preparation for the data segment.
+ */
+ iscsi_tcp_segment_splice_digest(segment,
+ segment->recv_digest);
+ return 0;
+ }
+
+ iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ segment->total_copied - ISCSI_DIGEST_SIZE,
+ segment->digest);
+
+ if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+ return ISCSI_ERR_HDR_DGST;
+ }
+
+ tcp_conn->in.hdr = hdr;
+ return iscsi_tcp_hdr_dissect(conn, hdr);
+}
+
+/**
+ * iscsi_tcp_recv_segment_is_hdr - tests if we are reading in a header
+ * @tcp_conn: iscsi tcp conn
+ *
+ * returns non zero if we are currently processing or setup to process
+ * a header.
+ */
+inline int iscsi_tcp_recv_segment_is_hdr(struct iscsi_tcp_conn *tcp_conn)
+{
+ return tcp_conn->in.segment.done == iscsi_tcp_hdr_recv_done;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
+
+/**
+ * iscsi_tcp_recv_skb - Process skb
+ * @conn: iscsi connection
+ * @skb: network buffer with header and/or data segment
+ * @offset: offset in skb
+ * @offload: bool indicating if transfer was offloaded
+ *
+ * Will return status of transfer in status. And will return
+ * number of bytes copied.
+ */
+int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
+ unsigned int offset, bool offloaded, int *status)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_segment *segment = &tcp_conn->in.segment;
+ struct skb_seq_state seq;
+ unsigned int consumed = 0;
+ int rc = 0;
+
+ debug_tcp("in %d bytes\n", skb->len - offset);
+
+ if (unlikely(conn->suspend_rx)) {
+ debug_tcp("conn %d Rx suspended!\n", conn->id);
+ *status = ISCSI_TCP_SUSPENDED;
+ return 0;
+ }
+
+ if (offloaded) {
+ segment->total_copied = segment->total_size;
+ goto segment_done;
+ }
+
+ skb_prepare_seq_read(skb, offset, skb->len, &seq);
+ while (1) {
+ unsigned int avail;
+ const u8 *ptr;
+
+ avail = skb_seq_read(consumed, &ptr, &seq);
+ if (avail == 0) {
+ debug_tcp("no more data avail. Consumed %d\n",
+ consumed);
+ *status = ISCSI_TCP_SKB_DONE;
+ skb_abort_seq_read(&seq);
+ goto skb_done;
+ }
+ BUG_ON(segment->copied >= segment->size);
+
+ debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+ rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+ BUG_ON(rc == 0);
+ consumed += rc;
+
+ if (segment->total_copied >= segment->total_size) {
+ skb_abort_seq_read(&seq);
+ goto segment_done;
+ }
+ }
+
+segment_done:
+ *status = ISCSI_TCP_SEGMENT_DONE;
+ debug_tcp("segment done\n");
+ rc = segment->done(tcp_conn, segment);
+ if (rc != 0) {
+ *status = ISCSI_TCP_CONN_ERR;
+ debug_tcp("Error receiving PDU, errno=%d\n", rc);
+ iscsi_conn_failure(conn, rc);
+ return 0;
+ }
+ /* The done() functions sets up the next segment. */
+
+skb_done:
+ conn->rxdata_octets += consumed;
+ return consumed;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
+
+/**
+ * iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+ * @task: scsi command task
+ * @sc: scsi command
+ */
+int iscsi_tcp_task_init(struct iscsi_task *task)
+{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct scsi_cmnd *sc = task->sc;
+ int err;
+
+ if (!sc) {
+ /*
+ * mgmt tasks do not have a scatterlist since they come
+ * in from the iscsi interface.
+ */
+ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+ task->itt);
+
+ return conn->session->tt->init_pdu(task, 0, task->data_count);
+ }
+
+ BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+ tcp_task->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+ debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+ conn->id, task->itt, task->imm_count,
+ task->unsol_r2t.data_length);
+
+ err = conn->session->tt->init_pdu(task, 0, task->imm_count);
+ if (err)
+ return err;
+ task->imm_count = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);
+
+static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
+{
+ struct iscsi_session *session = task->conn->session;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_r2t_info *r2t = NULL;
+
+ if (iscsi_task_has_unsol_data(task))
+ r2t = &task->unsol_r2t;
+ else {
+ spin_lock_bh(&session->lock);
+ if (tcp_task->r2t) {
+ r2t = tcp_task->r2t;
+ /* Continue with this R2T? */
+ if (r2t->data_length <= r2t->sent) {
+ debug_scsi(" done with r2t %p\n", r2t);
+ __kfifo_put(tcp_task->r2tpool.queue,
+ (void *)&tcp_task->r2t,
+ sizeof(void *));
+ tcp_task->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+ __kfifo_get(tcp_task->r2tqueue,
+ (void *)&tcp_task->r2t, sizeof(void *));
+ r2t = tcp_task->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+ }
+
+ return r2t;
+}
+
+/**
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+int iscsi_tcp_task_xmit(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_r2t_info *r2t;
+ int rc = 0;
+
+flush:
+ /* Flush any pending data first. */
+ rc = session->tt->xmit_pdu(task);
+ if (rc < 0)
+ return rc;
+
+ /* mgmt command */
+ if (!task->sc) {
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
+ return 0;
+ }
+
+ /* Are we done already? */
+ if (task->sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+ r2t = iscsi_tcp_get_curr_r2t(task);
+ if (r2t == NULL) {
+ /* Waiting for more R2Ts to arrive. */
+ debug_tcp("no R2Ts yet\n");
+ return 0;
+ }
+
+ rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_DATA_OUT);
+ if (rc)
+ return rc;
+ iscsi_prep_data_out_pdu(task, r2t, (struct iscsi_data *) task->hdr);
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+ r2t, r2t->datasn - 1, task->hdr->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ rc = conn->session->tt->init_pdu(task, r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ return rc;
+ r2t->sent += r2t->data_count;
+ goto flush;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_task_xmit);
+
+struct iscsi_cls_conn *
+iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
+ uint32_t conn_idx)
+
+{
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+ /*
+ * due to strange issues with iser these are not set
+ * in iscsi_conn_setup
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+ tcp_conn = conn->dd_data;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
+ if (!tcp_conn->dd_data) {
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+ return cls_conn;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
+
+void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ kfree(tcp_conn->dd_data);
+ iscsi_conn_teardown(cls_conn);
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
+
+int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
+{
+ int i;
+ int cmd_i;
+
+ /*
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+ struct iscsi_task *task = session->cmds[cmd_i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ /*
+ * pre-allocated x2 as much r2ts to handle race when
+ * target acks DataOut faster than we data_xmit() queues
+ * could replenish r2tqueue.
+ */
+
+ /* R2T pool */
+ if (iscsi_pool_init(&tcp_task->r2tpool,
+ session->max_r2t * 2, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+ tcp_task->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+ if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+ iscsi_pool_free(&tcp_task->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+
+ return 0;
+
+r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ kfifo_free(tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_alloc);
+
+void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
+{
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+
+ kfifo_free(tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
+ }
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
+
+void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ stats->txdata_octets = conn->txdata_octets;
+ stats->rxdata_octets = conn->rxdata_octets;
+ stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+ stats->dataout_pdus = conn->dataout_pdus_cnt;
+ stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+ stats->datain_pdus = conn->datain_pdus_cnt;
+ stats->r2t_pdus = conn->r2t_pdus_cnt;
+ stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+ stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+}
+EXPORT_SYMBOL_GPL(iscsi_tcp_conn_get_stats);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0e018d12653..dcba267db711 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -29,12 +29,21 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
-#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
-
+#define LPFC_TGTQ_INTERVAL 40000 /* Min amount of time between tgt
+ queue depth change in millisecs */
+#define LPFC_TGTQ_RAMPUP_PCENT 5 /* Target queue rampup in percentage */
+#define LPFC_MIN_TGT_QDEPTH 100
+#define LPFC_MAX_TGT_QDEPTH 0xFFFF
+
+#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
+ collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -49,6 +58,9 @@ struct lpfc_sli2_slim;
#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
+/* Error Attention event polling interval */
+#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
+
/* Define macros for 64 bit support */
#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -60,6 +72,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32
+/* Number of MSI-X vectors the driver uses */
+#define LPFC_MSIX_VECTORS 2
+
/* lpfc wait event data ready flag */
#define LPFC_DATA_READY (1<<0)
@@ -341,8 +356,6 @@ struct lpfc_vport {
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
- char *vname; /* Application assigned name */
-
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@@ -357,18 +370,21 @@ struct lpfc_vport {
uint32_t cfg_log_verbose;
uint32_t cfg_max_luns;
uint32_t cfg_enable_da_id;
+ uint32_t cfg_max_scsicmpl_time;
uint32_t dev_loss_tmo_changed;
struct fc_vport *fc_vport;
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
+ uint8_t stat_data_enabled;
+ uint8_t stat_data_blocked;
};
struct hbq_s {
@@ -407,10 +423,12 @@ struct lpfc_hba {
struct lpfc_sli sli;
uint32_t sli_rev; /* SLI2 or SLI3 */
uint32_t sli3_options; /* Mask of enabled SLI3 options */
-#define LPFC_SLI3_ENABLED 0x01
-#define LPFC_SLI3_HBQ_ENABLED 0x02
-#define LPFC_SLI3_NPIV_ENABLED 0x04
-#define LPFC_SLI3_VPORT_TEARDOWN 0x08
+#define LPFC_SLI3_HBQ_ENABLED 0x01
+#define LPFC_SLI3_NPIV_ENABLED 0x02
+#define LPFC_SLI3_VPORT_TEARDOWN 0x04
+#define LPFC_SLI3_CRP_ENABLED 0x08
+#define LPFC_SLI3_INB_ENABLED 0x10
+#define LPFC_SLI3_BG_ENABLED 0x20
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -422,10 +440,20 @@ struct lpfc_hba {
#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
- struct lpfc_sli2_slim *slim2p;
- struct lpfc_dmabuf hbqslimp;
+ uint32_t hba_flag; /* hba generic flags */
+#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
- dma_addr_t slim2p_mapping;
+ struct lpfc_dmabuf slim2p;
+
+ MAILBOX_t *mbox;
+ uint32_t *inb_ha_copy;
+ uint32_t *inb_counter;
+ uint32_t inb_last_counter;
+ uint32_t ha_copy;
+ struct _PCB *pcb;
+ struct _IOCB *IOCBs;
+
+ struct lpfc_dmabuf hbqslimp;
uint16_t pci_cfg_value;
@@ -474,12 +502,14 @@ struct lpfc_hba {
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_sg_seg_cnt;
+ uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
+ uint32_t cfg_enable_bg;
lpfc_vpd_t vpd; /* vital product data */
@@ -492,7 +522,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
- long data_flags;
+ unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -514,6 +544,7 @@ struct lpfc_hba {
void __iomem *HCregaddr; /* virtual address for host ctl reg */
struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
+ struct lpfc_pgp *port_gp;
uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
@@ -536,6 +567,7 @@ struct lpfc_hba {
uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
+ struct timer_list eratt_poll;
/*
* stat counters
@@ -543,6 +575,9 @@ struct lpfc_hba {
uint64_t fc4InputRequests;
uint64_t fc4OutputRequests;
uint64_t fc4ControlRequests;
+ uint64_t bg_guard_err_cnt;
+ uint64_t bg_apptag_err_cnt;
+ uint64_t bg_reftag_err_cnt;
struct lpfc_sysfs_mbox sysfs_mbox;
@@ -565,7 +600,9 @@ struct lpfc_hba {
struct fc_host_statistics link_stats;
enum intr_type_t intr_type;
- struct msix_entry msix_entries[1];
+ uint32_t intr_mode;
+#define LPFC_INTR_ERROR 0xFFFFFFFF
+ struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@@ -584,12 +621,14 @@ struct lpfc_hba {
unsigned long last_rsrc_error_time;
unsigned long last_ramp_down_time;
unsigned long last_ramp_up_time;
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
+ struct dentry *debug_dumpData; /* BlockGuard BPL*/
+ struct dentry *debug_dumpDif; /* BlockGuard BPL*/
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
@@ -605,6 +644,7 @@ struct lpfc_hba {
unsigned long last_completion_time;
struct timer_list hb_tmofunc;
uint8_t hb_outstanding;
+ enum hba_temp_state over_temp_state;
/* ndlp reference management */
spinlock_t ndlp_lock;
/*
@@ -613,7 +653,19 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- enum hba_temp_state over_temp_state;
+ int wait_4_mlo_maint_flg;
+ wait_queue_head_t wait_4_mlo_m_q;
+ /* data structure used for latency data collection */
+#define LPFC_NO_BUCKET 0
+#define LPFC_LINEAR_BUCKET 1
+#define LPFC_POWER2_BUCKET 2
+ uint8_t bucket_type;
+ uint32_t bucket_base;
+ uint32_t bucket_step;
+
+/* Maximum number of events that can be outstanding at any time*/
+#define LPFC_MAX_EVT_COUNT 512
+ atomic_t fast_event_count;
};
static inline struct Scsi_Host *
@@ -650,15 +702,25 @@ lpfc_worker_wake_up(struct lpfc_hba *phba)
return;
}
-#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
-#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
- event */
+static inline void
+lpfc_sli_read_hs(struct lpfc_hba *phba)
+{
+ /*
+ * There was a link/board error. Read the status register to retrieve
+ * the error event and process it.
+ */
+ phba->sli.slistat.err_attn_event++;
+
+ /* Save status info */
+ phba->work_hs = readl(phba->HSregaddr);
+ phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
+ phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
+
+ /* Clear chip Host Attention error bit */
+ writel(HA_ERATT, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ phba->pport->stopped = 1;
+
+ return;
+}
-struct temp_event {
- uint32_t event_type;
- uint32_t event_code;
- uint32_t data;
-};
-#define LPFC_CRIT_TEMP 0x1
-#define LPFC_THRESHOLD_TEMP 0x2
-#define LPFC_NORMAL_TEMP 0x3
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 37bfa0bd1dae..40cf0f4f327f 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -32,6 +32,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -49,6 +50,21 @@
#define LPFC_LINK_SPEED_BITMAP 0x00000117
#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
+/**
+ * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules.
+ * @incr: integer to convert.
+ * @hdw: ascii string holding converted integer plus a string terminator.
+ *
+ * Description:
+ * JEDEC Joint Electron Device Engineering Council.
+ * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
+ * character string. The string is then terminated with a NULL in byte 9.
+ * Hex 0-9 becomes ascii '0' to '9'.
+ * Hex a-f becomes ascii '=' to 'B' capital B.
+ *
+ * Notes:
+ * Coded for 32 bit integers only.
+ **/
static void
lpfc_jedec_to_ascii(int incr, char hdw[])
{
@@ -65,6 +81,14 @@ lpfc_jedec_to_ascii(int incr, char hdw[])
return;
}
+/**
+ * lpfc_drvr_version_show: Return the Emulex driver string with version number.
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -73,6 +97,69 @@ lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->cfg_enable_bg)
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ return snprintf(buf, PAGE_SIZE, "BlockGuard Enabled\n");
+ else
+ return snprintf(buf, PAGE_SIZE,
+ "BlockGuard Not Supported\n");
+ else
+ return snprintf(buf, PAGE_SIZE,
+ "BlockGuard Disabled\n");
+}
+
+static ssize_t
+lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_guard_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_apptag_err_cnt);
+}
+
+static ssize_t
+lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)phba->bg_reftag_err_cnt);
+}
+
+/**
+ * lpfc_info_show: Return some pci info about the host in ascii.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text from lpfc_info().
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
lpfc_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -81,6 +168,14 @@ lpfc_info_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
}
+/**
+ * lpfc_serialnum_show: Return the hba serial number in ascii.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text serial number.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -92,6 +187,18 @@ lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
}
+/**
+ * lpfc_temp_sensor_show: Return the temperature sensor level.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted support level.
+ *
+ * Description:
+ * Returns a number indicating the temperature sensor level currently
+ * supported, zero or one in ascii.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -102,6 +209,14 @@ lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n",phba->temp_sensor_support);
}
+/**
+ * lpfc_modeldesc_show: Return the model description of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model description.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -113,6 +228,14 @@ lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
}
+/**
+ * lpfc_modelname_show: Return the model name of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd model name.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -124,6 +247,14 @@ lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
}
+/**
+ * lpfc_programtype_show: Return the program type of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -135,6 +266,33 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
}
+/**
+ * lpfc_mlomgmt_show: Return the Menlo Maintenance sli flag.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the Menlo Maintenance sli flag.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->sli.sli_flag & LPFC_MENLO_MAINT));
+}
+
+/**
+ * lpfc_vportnum_show: Return the port number in ascii of the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -146,6 +304,14 @@ lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
}
+/**
+ * lpfc_fwrev_show: Return the firmware rev running in the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -159,6 +325,14 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
}
+/**
+ * lpfc_hdw_show: Return the jedec information about the hba.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the scsi vpd program type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -171,6 +345,15 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
}
+
+/**
+ * lpfc_option_rom_version_show: Return the adapter ROM FCode version.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ROM and FCode ascii strings.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -181,6 +364,18 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
}
+
+/**
+ * lpfc_state_show: Return the link state of the port.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Notes:
+ * The switch statement has no default so zero will be returned.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -232,8 +427,10 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
-
- if (phba->fc_topology == TOPOLOGY_LOOP) {
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ " Menlo Maint Mode\n");
+ else if (phba->fc_topology == TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += snprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -253,6 +450,18 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
return len;
}
+/**
+ * lpfc_num_discovered_ports_show: Return sum of mapped and unmapped vports.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the sum of fc mapped and unmapped.
+ *
+ * Description:
+ * Returns the ascii text number of the sum of the fc mapped and unmapped
+ * vport counts.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_num_discovered_ports_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -264,7 +473,20 @@ lpfc_num_discovered_ports_show(struct device *dev,
vport->fc_map_cnt + vport->fc_unmap_cnt);
}
-
+/**
+ * lpfc_issue_lip: Misnomer, name carried over from long ago.
+ * @shost: Scsi_Host pointer.
+ *
+ * Description:
+ * Bring the link down gracefully then re-init the link. The firmware will
+ * re-init the fiber channel interface as required. Does not issue a LIP.
+ *
+ * Returns:
+ * -EPERM port offline or management commands are being blocked
+ * -ENOMEM cannot allocate memory for the mailbox command
+ * -EIO error sending the mailbox command
+ * zero for success
+ **/
static int
lpfc_issue_lip(struct Scsi_Host *shost)
{
@@ -306,6 +528,21 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return 0;
}
+/**
+ * lpfc_do_offline: Issues a mailbox command to bring the link down.
+ * @phba: lpfc_hba pointer.
+ * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ * Can wait up to 5 seconds for the port ring buffers count
+ * to reach zero, prints a warning if it is not zero and continues.
+ * lpfc_workq_post_event() returns a non-zero return coce if call fails.
+ *
+ * Returns:
+ * -EIO error posting the event
+ * zero for success
+ **/
static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
@@ -353,6 +590,22 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
return 0;
}
+/**
+ * lpfc_selective_reset: Offline then onlines the port.
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * If the port is configured to allow a reset then the hba is brought
+ * offline then online.
+ *
+ * Notes:
+ * Assumes any error from lpfc_do_offline() will be negative.
+ *
+ * Returns:
+ * lpfc_do_offline() return code if not zero
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
static int
lpfc_selective_reset(struct lpfc_hba *phba)
{
@@ -378,6 +631,27 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_issue_reset: Selectively resets an adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string "selective".
+ * @count: unused variable.
+ *
+ * Description:
+ * If the buf contains the string "selective" then lpfc_selective_reset()
+ * is called to perform the reset.
+ *
+ * Notes:
+ * Assumes any error from lpfc_selective_reset() will be negative.
+ * If lpfc_selective_reset() returns zero then the length of the buffer
+ * is returned which indicates succcess
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain the string "selective"
+ * length of buf if lpfc-selective_reset() if the call succeeds
+ * return value of lpfc_selective_reset() if the call fails
+**/
static ssize_t
lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -397,6 +671,14 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
return status;
}
+/**
+ * lpfc_nport_evt_cnt_show: Return the number of nport events.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the ascii number of nport events.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -408,6 +690,14 @@ lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
+/**
+ * lpfc_board_mode_show: Return the state of the board.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the state of the adapter.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -429,6 +719,19 @@ lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", state);
}
+/**
+ * lpfc_board_mode_store: Puts the hba in online, offline, warm or error state.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing one of the strings "online", "offline", "warm" or "error".
+ * @count: unused variable.
+ *
+ * Returns:
+ * -EACCES if enable hba reset not enabled
+ * -EINVAL if the buffer does not contain a valid string (see above)
+ * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
+ * buf length greater than zero indicates success
+ **/
static ssize_t
lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -462,6 +765,24 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
return -EIO;
}
+/**
+ * lpfc_get_hba_info: Return various bits of informaton about the adapter.
+ * @phba: pointer to the adapter structure.
+ * @mxri max xri count.
+ * @axri available xri count.
+ * @mrpi max rpi count.
+ * @arpi available rpi count.
+ * @mvpi max vpi count.
+ * @avpi available vpi count.
+ *
+ * Description:
+ * If an integer pointer for an count is not null then the value for the
+ * count is returned.
+ *
+ * Returns:
+ * zero on error
+ * one for success
+ **/
static int
lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mxri, uint32_t *axri,
@@ -524,6 +845,20 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
return 1;
}
+/**
+ * lpfc_max_rpi_show: Return maximum rpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -538,6 +873,20 @@ lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_rpi_show: Return maximum rpi minus available rpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the used rpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -552,6 +901,20 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_max_xri_show: Return maximum xri.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mrpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -566,6 +929,20 @@ lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_xri_show: Return maximum xpi minus the available xpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used xri count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -580,6 +957,20 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_max_vpi_show: Return maximum vpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the maximum vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi count.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -594,6 +985,20 @@ lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_used_vpi_show: Return maximum vpi minus the available vpi.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the used vpi count in decimal or "Unknown".
+ *
+ * Description:
+ * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
+ * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
+ * to "Unknown" and the buffer length is returned, therefore the caller
+ * must check for "Unknown" in the buffer to detect a failure.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -608,6 +1013,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "Unknown\n");
}
+/**
+ * lpfc_npiv_info_show: Return text about NPIV support for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: text that must be interpreted to determine if npiv is supported.
+ *
+ * Description:
+ * Buffer will contain text indicating npiv is not suppoerted on the port,
+ * the port is an NPIV physical port, or it is an npiv virtual port with
+ * the id of the vport.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -623,6 +1041,17 @@ lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
}
+/**
+ * lpfc_poll_show: Return text about poll support for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the cfg_poll in hex.
+ *
+ * Notes:
+ * cfg_poll should be a lpfc_polling_flags type.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_poll_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -634,6 +1063,20 @@ lpfc_poll_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
}
+/**
+ * lpfc_poll_store: Set the value of cfg_poll for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Notes:
+ * buf contents converted to integer and checked for a valid value.
+ *
+ * Returns:
+ * -EINVAL if the buffer connot be converted or is out of range
+ * length of the buf on success
+ **/
static ssize_t
lpfc_poll_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -692,6 +1135,20 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
+/**
+ * lpfc_param_show: Return a cfg attribute value in decimal.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show.
+ *
+ * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -706,6 +1163,20 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
+/**
+ * lpfc_param_hex_show: Return a cfg attribute value in hex.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -720,6 +1191,25 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
phba->cfg_##attr);\
}
+/**
+ * lpfc_param_init: Intializes a cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: Initializes an attribute.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Validates the min and max values then sets the adapter config field
+ * accordingly, or uses the default if out of range and prints an error message.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
#define lpfc_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
@@ -735,6 +1225,26 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
return -EINVAL;\
}
+/**
+ * lpfc_param_set: Set a cfg attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: Sets an attribute value.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Description:
+ * Validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
#define lpfc_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
@@ -749,6 +1259,27 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
return -EINVAL;\
}
+/**
+ * lpfc_param_store: Set a vport attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_store.
+ *
+ * lpfc_##attr##_store: Set an sttribute value.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the attribute value in ascii.
+ * @count: not used.
+ *
+ * Description:
+ * Convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
#define lpfc_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -768,6 +1299,20 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_show: Return decimal formatted cfg attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in decimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in decimal.
+ *
+ * Returns: length of formatted string.
+ **/
#define lpfc_vport_param_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -780,6 +1325,21 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
+/**
+ * lpfc_vport_param_hex_show: Return hex formatted attribute value.
+ *
+ * Description:
+ * Macro that given an attr e.g.
+ * hba_queue_depth expands into a function with the name
+ * lpfc_hba_queue_depth_show
+ *
+ * lpfc_##attr##_show: prints the attribute value in hexidecimal.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the attribute value in hexidecimal.
+ *
+ * Returns: length of formatted string.
+ **/
#define lpfc_vport_param_hex_show(attr) \
static ssize_t \
lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
@@ -792,6 +1352,24 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
+/**
+ * lpfc_vport_param_init: Initialize a vport cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_init. The macro also
+ * takes a default argument, a minimum and maximum argument.
+ *
+ * lpfc_##attr##_init: validates the min and max values then sets the
+ * adapter config field accordingly, or uses the default if out of range
+ * and prints an error message.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if default used
+ **/
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
@@ -801,12 +1379,29 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "0449 lpfc_"#attr" attribute cannot be set to %d, "\
+ "0423 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
vport->cfg_##attr = default;\
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_set: Set a vport cfg attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth expands
+ * into a function with the name lpfc_hba_queue_depth_set
+ *
+ * lpfc_##attr##_set: validates the min and max values then sets the
+ * adapter config field if in the valid range. prints error message
+ * and does not set the parameter if invalid.
+ * @phba: pointer the the adapter structure.
+ * @val: integer attribute value.
+ *
+ * Returns:
+ * zero on success
+ * -EINVAL if val is invalid
+ **/
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
@@ -816,11 +1411,28 @@ lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
return 0;\
}\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
- "0450 lpfc_"#attr" attribute cannot be set to %d, "\
+ "0424 lpfc_"#attr" attribute cannot be set to %d, "\
"allowed range is ["#minval", "#maxval"]\n", val); \
return -EINVAL;\
}
+/**
+ * lpfc_vport_param_store: Set a vport attribute.
+ *
+ * Description:
+ * Macro that given an attr e.g. hba_queue_depth
+ * expands into a function with the name lpfc_hba_queue_depth_store
+ *
+ * lpfc_##attr##_store: convert the ascii text number to an integer, then
+ * use the lpfc_##attr##_set function to set the value.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the attribute value in decimal.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if val is invalid or lpfc_##attr##_set() fails
+ * length of buffer upon success.
+ **/
#define lpfc_vport_param_store(attr) \
static ssize_t \
lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
@@ -928,6 +1540,10 @@ lpfc_vport_param_store(name)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
+static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
+static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
+static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
+static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
@@ -941,6 +1557,7 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
+static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show, NULL);
static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
@@ -958,6 +1575,17 @@ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+/**
+ * lpfc_soft_wwn_enable_store: Allows setting of the wwn if the key is valid.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string lpfc_soft_wwn_key.
+ * @count: must be size of lpfc_soft_wwn_key.
+ *
+ * Returns:
+ * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
+ * length of buf indicates success
+ **/
static ssize_t
lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -994,6 +1622,14 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
lpfc_soft_wwn_enable_store);
+/**
+ * lpfc_soft_wwpn_show: Return the cfg soft ww port name of the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwpn in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1006,7 +1642,19 @@ lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
(unsigned long long)phba->cfg_soft_wwpn);
}
-
+/**
+ * lpfc_soft_wwpn_store: Set the ww port name of the adapter.
+ * @dev class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: contains the wwpn in hexidecimal.
+ * @count: number of wwpn bytes in buf
+ *
+ * Returns:
+ * -EACCES hba reset not enabled, adapter over temp
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
+ * -EIO error taking adapter offline or online
+ * value of count on success
+ **/
static ssize_t
lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1080,6 +1728,14 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
+/**
+ * lpfc_soft_wwnn_show: Return the cfg soft ww node name for the adapter.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the wwnn in hexidecimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1090,7 +1746,16 @@ lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
(unsigned long long)phba->cfg_soft_wwnn);
}
-
+/**
+ * lpfc_soft_wwnn_store: sets the ww node name of the adapter.
+ * @cdev: class device that is converted into a Scsi_host.
+ * @buf: contains the ww node name in hexidecimal.
+ * @count: number of wwnn bytes in buf.
+ *
+ * Returns:
+ * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
+ * value of count on success
+ **/
static ssize_t
lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1178,6 +1843,15 @@ module_param(lpfc_nodev_tmo, int, 0);
MODULE_PARM_DESC(lpfc_nodev_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
+
+/**
+ * lpfc_nodev_tmo_show: Return the hba dev loss timeout value.
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the dev loss timeout in decimal.
+ *
+ * Returns: size of formatted string.
+ **/
static ssize_t
lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1189,6 +1863,21 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
+/**
+ * lpfc_nodev_tmo_init: Set the hba nodev timeout value.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the nodev timeout value.
+ *
+ * Description:
+ * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
+ * a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
{
@@ -1196,7 +1885,7 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
if (val != LPFC_DEF_DEVLOSS_TMO)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0402 Ignoring nodev_tmo module "
+ "0407 Ignoring nodev_tmo module "
"parameter because devloss_tmo is "
"set.\n");
return 0;
@@ -1215,6 +1904,13 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
return -EINVAL;
}
+/**
+ * lpfc_update_rport_devloss_tmo: Update dev loss tmo value.
+ * @vport: lpfc vport structure pointer.
+ *
+ * Description:
+ * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
+ **/
static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
{
@@ -1229,6 +1925,21 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_nodev_tmo_set: Set the vport nodev tmo and devloss tmo values.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If the devloss tmo is already set or the vport dev loss tmo has changed
+ * then a kernel error message is printed and zero is returned.
+ * Else if val is in range then nodev tmo and devloss tmo are set to val.
+ * Otherwise nodev tmo is set to the default value.
+ *
+ * Returns:
+ * zero if already set or if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
{
@@ -1269,6 +1980,21 @@ MODULE_PARM_DESC(lpfc_devloss_tmo,
lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
lpfc_vport_param_show(devloss_tmo)
+
+/**
+ * lpfc_devloss_tmo_set: Sets vport nodev tmo, devloss tmo values, changed bit.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the tmo value.
+ *
+ * Description:
+ * If val is in a valid range then set the vport nodev tmo,
+ * devloss tmo, also set the vport dev loss tmo changed flag.
+ * Else a kernel error message is printed.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
{
@@ -1303,6 +2029,7 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
# LOG_LINK_EVENT 0x10 Link events
# LOG_FCP 0x40 FCP traffic history
# LOG_NODE 0x80 Node table events
+# LOG_BG 0x200 BlockBuard events
# LOG_MISC 0x400 Miscellaneous events
# LOG_SLI 0x800 SLI events
# LOG_FCP_ERROR 0x1000 Only log FCP errors
@@ -1366,12 +2093,27 @@ MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
+/**
+ * lpfc_restrict_login_init: Set the vport restrict login flag.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical clear the restrict login flag and return.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0449 lpfc_restrict_login attribute cannot "
+ "0422 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
@@ -1385,12 +2127,28 @@ lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
return 0;
}
+/**
+ * lpfc_restrict_login_set: Set the vport restrict login flag.
+ * @vport: lpfc vport structure pointer.
+ * @val: contains the restrict login value.
+ *
+ * Description:
+ * If val is not in a valid range then log a kernel error message and set
+ * the vport restrict login to one.
+ * If the port type is physical and the val is not zero log a kernel
+ * error message, clear the restrict login flag and return zero.
+ * Else set the restrict login flag to val.
+ *
+ * Returns:
+ * zero if val is in range
+ * -EINVAL val out of range
+ **/
static int
lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
{
if (val < 0 || val > 1) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0450 lpfc_restrict_login attribute cannot "
+ "0425 lpfc_restrict_login attribute cannot "
"be set to %d, allowed range is [0, 1]\n",
val);
vport->cfg_restrict_login = 1;
@@ -1441,6 +2199,23 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
# Default value is 0.
*/
+
+/**
+ * lpfc_topology_set: Set the adapters topology field.
+ * @phba: lpfc_hba pointer.
+ * @val: topology value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's topology field and
+ * issue a lip; if the lip fails reset the topology to the old value.
+ *
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
static int
lpfc_topology_set(struct lpfc_hba *phba, int val)
{
@@ -1469,6 +2244,335 @@ lpfc_param_store(topology)
static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
lpfc_topology_show, lpfc_topology_store);
+
+/**
+ * lpfc_stat_data_ctrl_store: write call back for lpfc_stat_data_ctrl
+ * sysfs file.
+ * @dev: Pointer to class device.
+ * @buf: Data buffer.
+ * @count: Size of the data buffer.
+ *
+ * This function get called when an user write to the lpfc_stat_data_ctrl
+ * sysfs file. This function parse the command written to the sysfs file
+ * and take appropriate action. These commands are used for controlling
+ * driver statistical data collection.
+ * Following are the command this function handles.
+ *
+ * setbucket <bucket_type> <base> <step>
+ * = Set the latency buckets.
+ * destroybucket = destroy all the buckets.
+ * start = start data collection
+ * stop = stop data collection
+ * reset = reset the collected data
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+#define LPFC_MAX_DATA_CTRL_LEN 1024
+ static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
+ unsigned long i;
+ char *str_ptr, *token;
+ struct lpfc_vport **vports;
+ struct Scsi_Host *v_shost;
+ char *bucket_type_str, *base_str, *step_str;
+ unsigned long base, step, bucket_type;
+
+ if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
+ if (strlen(buf) > LPFC_MAX_DATA_CTRL_LEN)
+ return -EINVAL;
+
+ strcpy(bucket_data, buf);
+ str_ptr = &bucket_data[0];
+ /* Ignore this token - this is command token */
+ token = strsep(&str_ptr, "\t ");
+ if (!token)
+ return -EINVAL;
+
+ bucket_type_str = strsep(&str_ptr, "\t ");
+ if (!bucket_type_str)
+ return -EINVAL;
+
+ if (!strncmp(bucket_type_str, "linear", strlen("linear")))
+ bucket_type = LPFC_LINEAR_BUCKET;
+ else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
+ bucket_type = LPFC_POWER2_BUCKET;
+ else
+ return -EINVAL;
+
+ base_str = strsep(&str_ptr, "\t ");
+ if (!base_str)
+ return -EINVAL;
+ base = simple_strtoul(base_str, NULL, 0);
+
+ step_str = strsep(&str_ptr, "\t ");
+ if (!step_str)
+ return -EINVAL;
+ step = simple_strtoul(step_str, NULL, 0);
+ if (!step)
+ return -EINVAL;
+
+ /* Block the data collection for every vport */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(v_shost->host_lock);
+ /* Block and reset data collection */
+ vports[i]->stat_data_blocked = 1;
+ if (vports[i]->stat_data_enabled)
+ lpfc_vport_reset_stat_data(vports[i]);
+ spin_unlock_irq(v_shost->host_lock);
+ }
+
+ /* Set the bucket attributes */
+ phba->bucket_type = bucket_type;
+ phba->bucket_base = base;
+ phba->bucket_step = step;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+
+ /* Unblock data collection */
+ spin_lock_irq(v_shost->host_lock);
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(v_shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ v_shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ vports[i]->stat_data_blocked = 1;
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ vports[i]->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ phba->bucket_type = LPFC_NO_BUCKET;
+ phba->bucket_base = 0;
+ phba->bucket_step = 0;
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "start", strlen("start"))) {
+ /* If no buckets configured return error */
+ if (phba->bucket_type == LPFC_NO_BUCKET)
+ return -EINVAL;
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_alloc_bucket(vport);
+ vport->stat_data_enabled = 1;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "stop", strlen("stop"))) {
+ spin_lock_irq(shost->host_lock);
+ if (vport->stat_data_enabled == 0) {
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ lpfc_free_bucket(vport);
+ vport->stat_data_enabled = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+
+ if (!strncmp(buf, "reset", strlen("reset"))) {
+ if ((phba->bucket_type == LPFC_NO_BUCKET)
+ || !vport->stat_data_enabled)
+ return strlen(buf);
+ spin_lock_irq(shost->host_lock);
+ vport->stat_data_blocked = 1;
+ lpfc_vport_reset_stat_data(vport);
+ vport->stat_data_blocked = 0;
+ spin_unlock_irq(shost->host_lock);
+ return strlen(buf);
+ }
+ return -EINVAL;
+}
+
+
+/**
+ * lpfc_stat_data_ctrl_show: Read callback function for
+ * lpfc_stat_data_ctrl sysfs file.
+ * @dev: Pointer to class device object.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_stat_data_ctrl sysfs file. This function report the
+ * current statistical data collection state.
+ **/
+static ssize_t
+lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int index = 0;
+ int i;
+ char *bucket_type;
+ unsigned long bucket_value;
+
+ switch (phba->bucket_type) {
+ case LPFC_LINEAR_BUCKET:
+ bucket_type = "linear";
+ break;
+ case LPFC_POWER2_BUCKET:
+ bucket_type = "power2";
+ break;
+ default:
+ bucket_type = "No Bucket";
+ break;
+ }
+
+ sprintf(&buf[index], "Statistical Data enabled :%d, "
+ "blocked :%d, Bucket type :%s, Bucket base :%d,"
+ " Bucket step :%d\nLatency Ranges :",
+ vport->stat_data_enabled, vport->stat_data_blocked,
+ bucket_type, phba->bucket_base, phba->bucket_step);
+ index = strlen(buf);
+ if (phba->bucket_type != LPFC_NO_BUCKET) {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET)
+ bucket_value = phba->bucket_base +
+ phba->bucket_step * i;
+ else
+ bucket_value = phba->bucket_base +
+ (1 << i) * phba->bucket_step;
+
+ if (index + 10 > PAGE_SIZE)
+ break;
+ sprintf(&buf[index], "%08ld ", bucket_value);
+ index = strlen(buf);
+ }
+ }
+ sprintf(&buf[index], "\n");
+ return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_stat_data_ctrl, S_IRUGO | S_IWUSR,
+ lpfc_stat_data_ctrl_show, lpfc_stat_data_ctrl_store);
+
+/*
+ * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
+ */
+
+/*
+ * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
+ * for each target.
+ */
+#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
+#define MAX_STAT_DATA_SIZE_PER_TARGET \
+ STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
+
+
+/**
+ * sysfs_drvr_stat_data_read: Read callback function for lpfc_drvr_stat_data
+ * sysfs attribute.
+ * @kobj: Pointer to the kernel object
+ * @bin_attr: Attribute object
+ * @buff: Buffer pointer
+ * @off: File offset
+ * @count: Buffer size
+ *
+ * This function is the read call back function for lpfc_drvr_stat_data
+ * sysfs file. This function export the statistical data to user
+ * applications.
+ **/
+static ssize_t
+sysfs_drvr_stat_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device,
+ kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int i = 0, index = 0;
+ unsigned long nport_index;
+ struct lpfc_nodelist *ndlp = NULL;
+ nport_index = (unsigned long)off /
+ MAX_STAT_DATA_SIZE_PER_TARGET;
+
+ if (!vport->stat_data_enabled || vport->stat_data_blocked
+ || (phba->bucket_type == LPFC_NO_BUCKET))
+ return 0;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
+ continue;
+
+ if (nport_index > 0) {
+ nport_index--;
+ continue;
+ }
+
+ if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
+ > count)
+ break;
+
+ if (!ndlp->lat_data)
+ continue;
+
+ /* Print the WWN */
+ sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
+ ndlp->nlp_portname.u.wwn[0],
+ ndlp->nlp_portname.u.wwn[1],
+ ndlp->nlp_portname.u.wwn[2],
+ ndlp->nlp_portname.u.wwn[3],
+ ndlp->nlp_portname.u.wwn[4],
+ ndlp->nlp_portname.u.wwn[5],
+ ndlp->nlp_portname.u.wwn[6],
+ ndlp->nlp_portname.u.wwn[7]);
+
+ index = strlen(buf);
+
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
+ sprintf(&buf[index], "%010u,",
+ ndlp->lat_data[i].cmd_count);
+ index = strlen(buf);
+ }
+ sprintf(&buf[index], "\n");
+ index = strlen(buf);
+ }
+ spin_unlock_irq(shost->host_lock);
+ return index;
+}
+
+static struct bin_attribute sysfs_drvr_stat_data_attr = {
+ .attr = {
+ .name = "lpfc_drvr_stat_data",
+ .mode = S_IRUSR,
+ .owner = THIS_MODULE,
+ },
+ .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
+ .read = sysfs_drvr_stat_data_read,
+ .write = NULL,
+};
+
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -1479,6 +2583,24 @@ static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
# 8 = 8 Gigabaud
# Value range is [0,8]. Default value is 0.
*/
+
+/**
+ * lpfc_link_speed_set: Set the adapters link speed.
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field and
+ * issue a lip; if the lip fails reset the link speed to the old value.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message and return an error.
+ *
+ * Returns:
+ * zero if val is in range and lip okay.
+ * non-zero return value from lpfc_issue_lip()
+ * -EINVAL val out of range
+ **/
static int
lpfc_link_speed_set(struct lpfc_hba *phba, int val)
{
@@ -1513,6 +2635,23 @@ static int lpfc_link_speed = 0;
module_param(lpfc_link_speed, int, 0);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
+
+/**
+ * lpfc_link_speed_init: Set the adapters link speed.
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range then set the adapter's link speed field.
+ *
+ * Notes:
+ * If the value is not in range log a kernel error message, clear the link
+ * speed and return an error.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
@@ -1522,7 +2661,7 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val)
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0454 lpfc_link_speed attribute cannot "
+ "0405 lpfc_link_speed attribute cannot "
"be set to %d, allowed values are "
"["LPFC_LINK_SPEED_STRING"]\n", val);
phba->cfg_link_speed = 0;
@@ -1548,6 +2687,48 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
+# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
+# depth. Default value is 0. When the value of this parameter is zero the
+# SCSI command completion time is not used for controlling I/O queue depth. When
+# the parameter is set to a non-zero value, the I/O queue depth is controlled
+# to limit the I/O completion time to the parameter value.
+# The value is set in milliseconds.
+*/
+static int lpfc_max_scsicmpl_time;
+module_param(lpfc_max_scsicmpl_time, int, 0);
+MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+ "Use command completion time to control queue depth");
+lpfc_vport_param_show(max_scsicmpl_time);
+lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
+static int
+lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ if (val == vport->cfg_max_scsicmpl_time)
+ return 0;
+ if ((val < 0) || (val > 60000))
+ return -EINVAL;
+ vport->cfg_max_scsicmpl_time = val;
+
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
+ ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ }
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+lpfc_vport_param_store(max_scsicmpl_time);
+static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
+ lpfc_max_scsicmpl_time_show,
+ lpfc_max_scsicmpl_time_store);
+
+/*
# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
# range is [0,1]. Default value is 0.
*/
@@ -1623,12 +2804,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
-# 0 = MSI disabled (default)
+# 0 = MSI disabled
# 1 = MSI enabled
-# 2 = MSI-X enabled
-# Value range is [0,2]. Default value is 0.
+# 2 = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
*/
-LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
@@ -1648,6 +2829,42 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
/*
+# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
+# 0 = BlockGuard disabled (default)
+# 1 = BlockGuard enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
+
+
+/*
+# lpfc_prot_mask: i
+# - Bit mask of host protection capabilities used to register with the
+# SCSI mid-layer
+# - Only meaningful if BG is turned on (lpfc_enable_bg=1).
+# - Allows you to ultimately specify which profiles to use
+# - Default will result in registering capabilities for all profiles.
+#
+*/
+unsigned int lpfc_prot_mask = SHOST_DIX_TYPE0_PROTECTION;
+
+module_param(lpfc_prot_mask, uint, 0);
+MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+
+/*
+# lpfc_prot_guard: i
+# - Bit mask of protection guard types to register with the SCSI mid-layer
+# - Guard types are currently either 1) IP checksum 2) T10-DIF CRC
+# - Allows you to ultimately specify which profiles to use
+# - Default will result in registering capabilities for all guard types
+#
+*/
+unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
+module_param(lpfc_prot_guard, byte, 0);
+MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+
+
+/*
* lpfc_sg_seg_cnt: Initial Maximum DMA Segment Count
* This value can be set to values between 64 and 256. The default value is
* 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
@@ -1656,7 +2873,15 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
+ LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
+ "Max Protection Scatter Gather Segment Count");
+
struct device_attribute *lpfc_hba_attrs[] = {
+ &dev_attr_bg_info,
+ &dev_attr_bg_guard_err,
+ &dev_attr_bg_apptag_err,
+ &dev_attr_bg_reftag_err,
&dev_attr_info,
&dev_attr_serialnum,
&dev_attr_modeldesc,
@@ -1668,6 +2893,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version,
&dev_attr_link_state,
&dev_attr_num_discovered_ports,
+ &dev_attr_menlo_mgmt_mode,
&dev_attr_lpfc_drvr_version,
&dev_attr_lpfc_temp_sensor,
&dev_attr_lpfc_log_verbose,
@@ -1703,12 +2929,16 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll,
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
+ &dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
&dev_attr_lpfc_soft_wwn_enable,
&dev_attr_lpfc_enable_hba_reset,
&dev_attr_lpfc_enable_hba_heartbeat,
&dev_attr_lpfc_sg_seg_cnt,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
+ &dev_attr_lpfc_prot_sg_seg_cnt,
NULL,
};
@@ -1731,9 +2961,29 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_nport_evt_cnt,
&dev_attr_npiv_info,
&dev_attr_lpfc_enable_da_id,
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
NULL,
};
+/**
+ * sysfs_ctlreg_write: Write method for writing to ctlreg.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * -EPERM adapter is offline
+ * value of count, buf contents written
+ **/
static ssize_t
sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1766,6 +3016,23 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
return count;
}
+/**
+ * sysfs_ctlreg_read: Read method for reading from ctlreg.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: if succesful contains the data from the adapter IOREG space.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
+ * Uses the adapter io control registers to read data into buf.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * value of count, buf contents read
+ **/
static ssize_t
sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1810,7 +3077,10 @@ static struct bin_attribute sysfs_ctlreg_attr = {
.write = sysfs_ctlreg_write,
};
-
+/**
+ * sysfs_mbox_idle: frees the sysfs mailbox.
+ * @phba: lpfc_hba pointer
+ **/
static void
sysfs_mbox_idle(struct lpfc_hba *phba)
{
@@ -1824,6 +3094,27 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
}
}
+/**
+ * sysfs_mbox_write: Write method for writing information via mbox.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be written to sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/mbox.
+ * Uses the sysfs mbox to send buf contents to the adapter.
+ *
+ * Returns:
+ * -ERANGE off and count combo out of range
+ * -EINVAL off, count or buff address invalid
+ * zero if count is zero
+ * -EPERM adapter is offline
+ * -ENOMEM failed to allocate memory for the mail box
+ * -EAGAIN offset, state or mbox is NULL
+ * count number of bytes transferred
+ **/
static ssize_t
sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1878,6 +3169,29 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
return count;
}
+/**
+ * sysfs_mbox_read: Read method for reading information via mbox.
+ * @kobj: kernel kobject that contains the kernel class device.
+ * @bin_attr: kernel attributes passed to us.
+ * @buf: contains the data to be read from sysfs mbox.
+ * @off: offset into buffer to beginning of data.
+ * @count: bytes to transfer.
+ *
+ * Description:
+ * Accessed via /sys/class/scsi_host/hostxxx/mbox.
+ * Uses the sysfs mbox to receive data from to the adapter.
+ *
+ * Returns:
+ * -ERANGE off greater than mailbox command size
+ * -EINVAL off, count or buff address invalid
+ * zero if off and count are zero
+ * -EACCES adapter over temp
+ * -EPERM garbage can value to catch a multitude of errors
+ * -EAGAIN management IO not permitted, state or off error
+ * -ETIME mailbox timeout
+ * -ENODEV mailbox error
+ * count number of bytes transferred
+ **/
static ssize_t
sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -1954,6 +3268,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
case MBX_DEL_LD_ENTRY:
case MBX_SET_VARIABLE:
case MBX_WRITE_WWN:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
break;
case MBX_READ_SPARM64:
case MBX_READ_LA:
@@ -1978,17 +3294,15 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
- if ((phba->pport->stopped) &&
- (phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_DUMP_MEMORY &&
- phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_RESTART &&
- phba->sysfs_mbox.mbox->mb.mbxCommand !=
- MBX_WRITE_VPARMS)) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
+ if (phba->pport->stopped &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
+ phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "1259 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+ phba->sysfs_mbox.mbox->mb.mbxCommand);
phba->sysfs_mbox.mbox->vport = vport;
@@ -2059,6 +3373,14 @@ static struct bin_attribute sysfs_mbox_attr = {
.write = sysfs_mbox_write,
};
+/**
+ * lpfc_alloc_sysfs_attr: Creates the ctlreg and mbox entries.
+ * @vport: address of lpfc vport structure.
+ *
+ * Return codes:
+ * zero on success
+ * error return code from sysfs_create_bin_file()
+ **/
int
lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
{
@@ -2066,9 +3388,16 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
int error;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+
+ /* Virtual ports do not need ctrl_reg and mbox */
+ if (error || vport->port_type == LPFC_NPIV_PORT)
+ goto out;
+
+ error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out;
+ goto out_remove_stat_attr;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -2078,15 +3407,26 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
+out_remove_stat_attr:
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
out:
return error;
}
+/**
+ * lpfc_free_sysfs_attr: Removes the ctlreg and mbox entries.
+ * @vport: address of lpfc vport structure.
+ **/
void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
+ sysfs_remove_bin_file(&shost->shost_dev.kobj,
+ &sysfs_drvr_stat_data_attr);
+ /* Virtual ports do not need ctrl_reg and mbox */
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return;
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
}
@@ -2096,6 +3436,10 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
* Dynamic FC Host Attributes Support
*/
+/**
+ * lpfc_get_host_port_id: Copy the vport DID into the scsi host port id.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_id(struct Scsi_Host *shost)
{
@@ -2105,6 +3449,10 @@ lpfc_get_host_port_id(struct Scsi_Host *shost)
fc_host_port_id(shost) = vport->fc_myDID;
}
+/**
+ * lpfc_get_host_port_type: Set the value of the scsi host port type.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_type(struct Scsi_Host *shost)
{
@@ -2133,6 +3481,10 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_port_state: Set the value of the scsi host port state.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_port_state(struct Scsi_Host *shost)
{
@@ -2167,6 +3519,10 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_speed: Set the value of the scsi host speed.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_speed(struct Scsi_Host *shost)
{
@@ -2199,6 +3555,10 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_get_host_fabric_name: Set the value of the scsi host fabric name.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_get_host_fabric_name (struct Scsi_Host *shost)
{
@@ -2221,6 +3581,18 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
fc_host_fabric_name(shost) = node_name;
}
+/**
+ * lpfc_get_stats: Return statistical information about the adapter.
+ * @shost: kernel scsi host pointer.
+ *
+ * Notes:
+ * NULL on error for link down, no mbox pool, sli2 active,
+ * management not allowed, memory allocation error, or mbox error.
+ *
+ * Returns:
+ * NULL for error
+ * address of the adapter host statistics
+ **/
static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host *shost)
{
@@ -2334,6 +3706,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
return hs;
}
+/**
+ * lpfc_reset_stats: Copy the adapter link stats information.
+ * @shost: kernel scsi host pointer.
+ **/
static void
lpfc_reset_stats(struct Scsi_Host *shost)
{
@@ -2411,6 +3787,14 @@ lpfc_reset_stats(struct Scsi_Host *shost)
* are no sysfs handlers for link_down_tmo.
*/
+/**
+ * lpfc_get_node_by_target: Return the nodelist for a target.
+ * @starget: kernel scsi target pointer.
+ *
+ * Returns:
+ * address of the node list if found
+ * NULL target not found
+ **/
static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target *starget)
{
@@ -2432,6 +3816,10 @@ lpfc_get_node_by_target(struct scsi_target *starget)
return NULL;
}
+/**
+ * lpfc_get_starget_port_id: Set the target port id to the ndlp DID or -1.
+ * @starget: kernel scsi target pointer.
+ **/
static void
lpfc_get_starget_port_id(struct scsi_target *starget)
{
@@ -2440,6 +3828,12 @@ lpfc_get_starget_port_id(struct scsi_target *starget)
fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
}
+/**
+ * lpfc_get_starget_node_name: Set the target node name.
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: Set the target node name to the ndlp node name wwn or zero.
+ **/
static void
lpfc_get_starget_node_name(struct scsi_target *starget)
{
@@ -2449,6 +3843,12 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
}
+/**
+ * lpfc_get_starget_port_name: Set the target port name.
+ * @starget: kernel scsi target pointer.
+ *
+ * Description: set the target port name to the ndlp port name wwn or zero.
+ **/
static void
lpfc_get_starget_port_name(struct scsi_target *starget)
{
@@ -2458,6 +3858,15 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
}
+/**
+ * lpfc_set_rport_loss_tmo: Set the rport dev loss tmo.
+ * @rport: fc rport address.
+ * @timeout: new value for dev loss tmo.
+ *
+ * Description:
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ **/
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
@@ -2467,7 +3876,18 @@ lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
rport->dev_loss_tmo = 1;
}
-
+/**
+ * lpfc_rport_show_function: Return rport target information.
+ *
+ * Description:
+ * Macro that uses field to generate a function with the name lpfc_show_rport_
+ *
+ * lpfc_show_rport_##field: returns the bytes formatted in buf
+ * @cdev: class converted to an fc_rport.
+ * @buf: on return contains the target_field or zero.
+ *
+ * Returns: size of formatted string.
+ **/
#define lpfc_rport_show_function(field, format_string, sz, cast) \
static ssize_t \
lpfc_show_rport_##field (struct device *dev, \
@@ -2484,6 +3904,23 @@ lpfc_show_rport_##field (struct device *dev, \
lpfc_rport_show_function(field, format_string, sz, ) \
static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
+/**
+ * lpfc_set_vport_symbolic_name: Set the vport's symbolic name.
+ * @fc_vport: The fc_vport who's symbolic name has been changed.
+ *
+ * Description:
+ * This function is called by the transport after the @fc_vport's symbolic name
+ * has been changed. This function re-registers the symbolic name with the
+ * switch to propogate the change into the fabric if the vport is active.
+ **/
+static void
+lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
+{
+ struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
+
+ if (vport->port_state == LPFC_VPORT_READY)
+ lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
+}
struct fc_function_template lpfc_transport_functions = {
/* fixed attributes the driver supports */
@@ -2493,6 +3930,7 @@ struct fc_function_template lpfc_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+ .show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
@@ -2542,6 +3980,10 @@ struct fc_function_template lpfc_transport_functions = {
.terminate_rport_io = lpfc_terminate_rport_io,
.dd_fcvport_size = sizeof(struct lpfc_vport *),
+
+ .vport_disable = lpfc_vport_disable,
+
+ .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
};
struct fc_function_template lpfc_vport_transport_functions = {
@@ -2552,6 +3994,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
.show_host_supported_fc4s = 1,
.show_host_supported_speeds = 1,
.show_host_maxframe_size = 1,
+ .show_host_symbolic_name = 1,
/* dynamic attributes the driver supports */
.get_host_port_id = lpfc_get_host_port_id,
@@ -2600,8 +4043,14 @@ struct fc_function_template lpfc_vport_transport_functions = {
.terminate_rport_io = lpfc_terminate_rport_io,
.vport_disable = lpfc_vport_disable,
+
+ .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
};
+/**
+ * lpfc_get_cfgparam: Used during probe_one to init the adapter structure.
+ * @phba: lpfc_hba pointer.
+ **/
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
@@ -2618,13 +4067,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+ lpfc_enable_bg_init(phba, lpfc_enable_bg);
phba->cfg_poll = lpfc_poll;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
- /* Also reinitialize the host templates with new values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -2633,10 +4081,25 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+ if (phba->cfg_enable_bg) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+ phba->cfg_sg_dma_buf_size +=
+ phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+ }
+
+ /* Also reinitialize the host templates with new values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
return;
}
+/**
+ * lpfc_get_vport_cfgparam: Used during port create, init the vport structure.
+ * @vport: lpfc_vport pointer.
+ **/
void
lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
{
@@ -2648,6 +4111,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
+ lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
lpfc_max_luns_init(vport, lpfc_max_luns);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 1b8245213b83..07f4976319a5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,19 +18,20 @@
* included with this package. *
*******************************************************************/
-typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
+typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
- struct lpfc_dmabuf *mp);
+int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
+void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -43,7 +44,7 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
-void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
+void lpfc_cleanup_rpis(struct lpfc_vport *, int);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -135,7 +136,7 @@ void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
-void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
+void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -155,6 +156,8 @@ int lpfc_sli_queue_setup(struct lpfc_hba *);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
irqreturn_t lpfc_intr_handler(int, void *);
+irqreturn_t lpfc_sp_intr_handler(int, void *);
+irqreturn_t lpfc_fp_intr_handler(int, void *);
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -175,11 +178,12 @@ void lpfc_mem_free(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
void lpfc_poll_timeout(unsigned long ptr);
-void lpfc_poll_start_timer(struct lpfc_hba * phba);
-void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
+void lpfc_poll_start_timer(struct lpfc_hba *);
+void lpfc_poll_eratt(unsigned long);
+void lpfc_sli_poll_fcp_ring(struct lpfc_hba *);
struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
-void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
-uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
+void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
+uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
void lpfc_reset_barrier(struct lpfc_hba * phba);
int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -187,11 +191,13 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
int lpfc_sli_brdreset(struct lpfc_hba *);
int lpfc_sli_brdrestart(struct lpfc_hba *);
int lpfc_sli_hba_setup(struct lpfc_hba *);
+int lpfc_sli_config_port(struct lpfc_hba *, int);
int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+int lpfc_sli_check_eratt(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -199,6 +205,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, uint32_t);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -226,17 +233,13 @@ struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
struct lpfc_name *);
-int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
- uint32_t timeout);
+int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
- struct lpfc_sli_ring * pring,
- struct lpfc_iocbq * piocb,
- struct lpfc_iocbq * prspiocbq,
- uint32_t timeout);
-void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
- struct lpfc_iocbq * cmdiocb,
- struct lpfc_iocbq * rspiocb);
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *, struct lpfc_iocbq *,
+ uint32_t);
+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -269,7 +272,7 @@ void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct device *);
int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
-void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
+int lpfc_mbx_unreg_vpi(struct lpfc_vport *);
void destroy_port(struct lpfc_vport *);
int lpfc_get_instance(void);
void lpfc_host_attrib_init(struct Scsi_Host *);
@@ -282,14 +285,33 @@ extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
uint32_t, uint32_t);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+/* externs BlockGuard */
+extern char *_dump_buf_data;
+extern unsigned long _dump_buf_data_order;
+extern char *_dump_buf_dif;
+extern unsigned long _dump_buf_dif_order;
+extern spinlock_t _dump_buf_lock;
+extern int _dump_buf_done;
+extern spinlock_t pgcnt_lock;
+extern unsigned int pgcnt;
+extern unsigned int lpfc_prot_mask;
+extern unsigned char lpfc_prot_guard;
+
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
void lpfc_fabric_block_timeout(unsigned long);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
-void lpfc_adjust_queue_depth(struct lpfc_hba *);
+void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+void lpfc_scsi_dev_block(struct lpfc_hba *);
+
+void
+lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
+void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7fc74cf5823b..896c7b0351e5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -34,6 +34,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -134,25 +135,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_del(&head);
} else {
- struct lpfc_iocbq *next;
-
- list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &piocbq->list);
+ list_for_each_entry(iocbq, &head, list) {
icmd = &iocbq->iocb;
if (icmd->ulpBdeCount == 0)
- lpfc_ct_unsol_buffer(phba, piocbq, NULL, 0);
+ lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
for (i = 0; i < icmd->ulpBdeCount; i++) {
paddr = getPaddr(icmd->un.cont64[i].addrHigh,
icmd->un.cont64[i].addrLow);
mp = lpfc_sli_ringpostbuf_get(phba, pring,
paddr);
size = icmd->un.cont64[i].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
+ lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- list_del(&iocbq->list);
- lpfc_sli_release_iocbq(phba, iocbq);
lpfc_post_buffer(phba, pring, i);
}
+ list_del(&head);
}
}
@@ -212,7 +212,7 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
else
list_add_tail(&mp->list, &mlist->list);
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
/* build buffer ptr list for IOCB */
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
@@ -283,7 +283,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.ulpIoTag32 = 0;
icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
if (usr_flg)
@@ -560,18 +560,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
/* Don't bother processing response if vport is being torn down. */
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING) {
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
goto out;
+ }
if (lpfc_els_chk_latt(vport)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0216 Link event during NS query\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
if (lpfc_error_lost_link(irsp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0226 NS query failed due to link event\n");
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
goto out;
}
if (irsp->ulpStatus) {
@@ -587,6 +594,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (rc == 0)
goto out;
}
+ if (vport->fc_flag & FC_RSCN_MODE)
+ lpfc_els_flush_rscn(vport);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0257 GID_FT Query error: 0x%x 0x%x\n",
@@ -861,7 +870,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry++;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0216 Retrying NS cmd %x\n", cmdcode);
+ "0250 Retrying NS cmd %x\n", cmdcode);
rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
if (rc == 0)
goto out;
@@ -1008,8 +1017,10 @@ lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
if (n < size)
n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
- if (n < size && vport->vname)
- n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
+ if (n < size &&
+ strlen(vport->fc_vport->symbolic_name))
+ n += snprintf(symbol + n, size - n, " VName-%s",
+ vport->fc_vport->symbolic_name);
return n;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 094b47e94b29..b615eda361d5 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -35,6 +35,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -45,14 +46,15 @@
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
-#ifdef CONFIG_LPFC_DEBUG_FS
-/* debugfs interface
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * debugfs interface
*
* To access this interface the user should:
* # mkdir /debug
* # mount -t debugfs none /debug
*
- * The lpfc debugfs directory hierachy is:
+ * The lpfc debugfs directory hierarchy is:
* lpfc/lpfcX/vportY
* where X is the lpfc hba unique_id
* where Y is the vport VPI on that hba
@@ -61,14 +63,21 @@
* discovery_trace
* This is an ACSII readable file that contains a trace of the last
* lpfc_debugfs_max_disc_trc events that happened on a specific vport.
- * See lpfc_debugfs.h for different categories of
- * discovery events. To enable the discovery trace, the following
- * module parameters must be set:
+ * See lpfc_debugfs.h for different categories of discovery events.
+ * To enable the discovery trace, the following module parameters must be set:
* lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
* lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
* EACH vport. X MUST also be a power of 2.
* lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
* lpfc_debugfs.h .
+ *
+ * slow_ring_trace
+ * This is an ACSII readable file that contains a trace of the last
+ * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
+ * To enable the slow ring trace, the following module parameters must be set:
+ * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
+ * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
+ * the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
module_param(lpfc_debugfs_enable, int, 0);
@@ -117,6 +126,25 @@ struct lpfc_debug {
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/**
+ * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer.
+ * @vport: The vport to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc discovery debugfs data from the @vport and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Discovery logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
{
@@ -125,7 +153,6 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_debugfs_trc *dtp;
char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
-
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -159,6 +186,25 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer.
+ * @phba: The HBA to gather the log info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine gathers the lpfc slow ring debugfs data from the @phba and
+ * dumps it to @buf up to @size number of bytes. It will start at the next entry
+ * in the log and process the log until the end of the buffer. Then it will
+ * gather from the beginning of the log and process until the current entry.
+ *
+ * Notes:
+ * Slow ring logging will be disabled while while this routine dumps the log.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -203,6 +249,25 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
static int lpfc_debugfs_last_hbq = -1;
+/**
+ * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer.
+ * @phba: The HBA to gather host buffer info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the host buffer queue info from the @phba to @buf up to
+ * @size number of bytes. A header that describes the current hbq state will be
+ * dumped to @buf first and then info on each hbq entry will be dumped to @buf
+ * until @size bytes have been dumped or all the hbq info has been dumped.
+ *
+ * Notes:
+ * This routine will rotate through each configured HBQ each time called.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -303,6 +368,24 @@ skipit:
static int lpfc_debugfs_last_hba_slim_off;
+/**
+ * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer.
+ * @phba: The HBA to gather SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of HBA SLIM for the HBA associated
+ * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
+ *
+ * Notes:
+ * This routine will only dump up to 1024 bytes of data each time called and
+ * should be called multiple times to dump the entire HBA SLIM.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -342,6 +425,21 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer.
+ * @phba: The HBA to gather Host SLIM info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current contents of host SLIM for the host associated
+ * with @phba to @buf up to @size bytes of data. The dump will contain the
+ * Mailbox, PCB, Rings, and Registers that are located in host memory.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
{
@@ -357,7 +455,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
spin_lock_irq(&phba->hbalock);
len += snprintf(buf+len, size-len, "SLIM Mailbox\n");
- ptr = (uint32_t *)phba->slim2p;
+ ptr = (uint32_t *)phba->slim2p.virt;
i = sizeof(MAILBOX_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@@ -370,7 +468,7 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
}
len += snprintf(buf+len, size-len, "SLIM PCB\n");
- ptr = (uint32_t *)&phba->slim2p->pcb;
+ ptr = (uint32_t *)phba->pcb;
i = sizeof(PCB_t);
while (i > 0) {
len += snprintf(buf+len, size-len,
@@ -382,44 +480,16 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t));
}
- pgpp = (struct lpfc_pgp *)&phba->slim2p->mbx.us.s3_pgp.port;
- pring = &psli->ring[0];
- len += snprintf(buf+len, size-len,
- "Ring 0: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[1];
- len += snprintf(buf+len, size-len,
- "Ring 1: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[2];
- len += snprintf(buf+len, size-len,
- "Ring 2: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
- pgpp++;
-
- pring = &psli->ring[3];
- len += snprintf(buf+len, size-len,
- "Ring 3: CMD GetInx:%d (Max:%d Next:%d Local:%d flg:x%x) "
- "RSP PutInx:%d Max:%d\n",
- pgpp->cmdGetInx, pring->numCiocb,
- pring->next_cmdidx, pring->local_getidx, pring->flag,
- pgpp->rspPutInx, pring->numRiocb);
-
-
- ptr = (uint32_t *)&phba->slim2p->mbx.us.s3_pgp.hbq_get;
+ for (i = 0; i < 4; i++) {
+ pgpp = &phba->port_gp[i];
+ pring = &psli->ring[i];
+ len += snprintf(buf+len, size-len,
+ "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
+ "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
+ i, pgpp->cmdGetInx, pring->numCiocb,
+ pring->next_cmdidx, pring->local_getidx,
+ pring->flag, pgpp->rspPutInx, pring->numRiocb);
+ }
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
@@ -430,6 +500,21 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
return len;
}
+/**
+ * lpfc_debugfs_nodelist_data - Dump target node list to a buffer.
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the current target node list associated with @vport to
+ * @buf up to @size bytes of data. Each node entry in the dump will contain a
+ * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
static int
lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
{
@@ -513,12 +598,27 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
}
#endif
-
+/**
+ * lpfc_debugfs_disc_trc - Store discovery trace log.
+ * @vport: The vport to associate this trace string with for retrieval.
+ * @mask: Log entry classification.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. Only entries with a @mask that
+ * match the current debugfs discovery mask will be saved. Entries that do not
+ * match will be thrown away. @fmt, @data1, @data2, and @data3 are used like
+ * printf when displaying the log.
+ **/
inline void
lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc *dtp;
int index;
@@ -542,11 +642,24 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
return;
}
+/**
+ * lpfc_debugfs_slow_ring_trc - Store slow ring trace log.
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * discovery trace buffer associated with @vport. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
inline void
lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
uint32_t data1, uint32_t data2, uint32_t data3)
{
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc *dtp;
int index;
@@ -567,7 +680,22 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
return;
}
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+/**
+ * lpfc_debugfs_disc_trc_open - Open the discovery trace log.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
{
@@ -585,7 +713,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
size = (lpfc_debugfs_max_disc_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@@ -603,6 +731,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_slow_ring_trc_open - Open the Slow Ring trace log.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
{
@@ -620,7 +763,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
size = (lpfc_debugfs_max_slow_ring_trc * LPFC_DEBUG_TRC_ENTRY_SIZE);
size = PAGE_ALIGN(size);
@@ -638,6 +781,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_hbqinfo_open - Open the hbqinfo debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
{
@@ -649,7 +807,7 @@ lpfc_debugfs_hbqinfo_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_HBQINFO_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -665,6 +823,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
{
@@ -676,7 +849,7 @@ lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -692,6 +865,21 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
static int
lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
{
@@ -703,7 +891,7 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -720,6 +908,106 @@ out:
}
static int
+lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ if (!_dump_buf_data)
+ return -EBUSY;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ printk(KERN_ERR "BLKGRD %s: _dump_buf_data=0x%p\n",
+ __func__, _dump_buf_data);
+ debug->buffer = _dump_buf_data;
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int
+lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ if (!_dump_buf_dif)
+ return -EBUSY;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundry */
+ printk(KERN_ERR "BLKGRD %s: _dump_buf_dif=0x%p file=%s\n", __func__,
+ _dump_buf_dif, file->f_dentry->d_name.name);
+ debug->buffer = _dump_buf_dif;
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ /*
+ * The Data/DIF buffers only save one failing IO
+ * The write op is used as a reset mechanism after an IO has
+ * already been saved to the next one can be saved
+ */
+ spin_lock(&_dump_buf_lock);
+
+ memset((void *)_dump_buf_data, 0,
+ ((1 << PAGE_SHIFT) << _dump_buf_data_order));
+ memset((void *)_dump_buf_dif, 0,
+ ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
+
+ _dump_buf_done = 0;
+
+ spin_unlock(&_dump_buf_lock);
+
+ return nbytes;
+}
+
+
+
+/**
+ * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file.
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an negative
+ * error value.
+ **/
+static int
lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
@@ -730,7 +1018,7 @@ lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- /* Round to page boundry */
+ /* Round to page boundary */
debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
@@ -746,6 +1034,23 @@ out:
return rc;
}
+/**
+ * lpfc_debugfs_lseek - Seek through a debugfs file.
+ * @file: The file pointer to seek through.
+ * @off: The offset to seek to or the amount to seek by.
+ * @whence: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation. The
+ * @whence parameter indicates whether @off is the offset to directly seek to,
+ * or if it is a value to seek forward or reverse by. This function figures out
+ * what the new offset of the debugfs file will be and assigns that value to the
+ * f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ **/
static loff_t
lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
{
@@ -767,6 +1072,22 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
+/**
+ * lpfc_debugfs_read - Read a debugfs file.
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from from the buffer indicated in the private_data
+ * field of @file. It will start reading at @ppos and copy up to @nbytes of
+ * data to @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
static ssize_t
lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
@@ -776,6 +1097,18 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
debug->len);
}
+/**
+ * lpfc_debugfs_release - Release the buffer used to store debugfs file data.
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file was
+ * opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
static int
lpfc_debugfs_release(struct inode *inode, struct file *file)
{
@@ -787,6 +1120,17 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
return 0;
}
+static int
+lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ debug->buffer = NULL;
+ kfree(debug);
+
+ return 0;
+}
+
#undef lpfc_debugfs_op_disc_trc
static struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -832,6 +1176,26 @@ static struct file_operations lpfc_debugfs_op_dumpHostSlim = {
.release = lpfc_debugfs_release,
};
+#undef lpfc_debugfs_op_dumpData
+static struct file_operations lpfc_debugfs_op_dumpData = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpData_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_dumpDataDif_write,
+ .release = lpfc_debugfs_dumpDataDif_release,
+};
+
+#undef lpfc_debugfs_op_dumpDif
+static struct file_operations lpfc_debugfs_op_dumpDif = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_dumpDif_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_dumpDataDif_write,
+ .release = lpfc_debugfs_dumpDataDif_release,
+};
+
#undef lpfc_debugfs_op_slow_ring_trc
static struct file_operations lpfc_debugfs_op_slow_ring_trc = {
.owner = THIS_MODULE,
@@ -845,10 +1209,20 @@ static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
#endif
+/**
+ * lpfc_debugfs_initialize - Initialize debugfs for a vport.
+ * @vport: The vport pointer to initialize.
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the lpfc debugfs file system.
+ * If not already created, this routine will create the lpfc directory, and
+ * lpfcX directory (for this HBA), and vportX directory for this vport. It will
+ * also create each file used to access lpfc specific debugfs information.
+ **/
inline void
lpfc_debugfs_initialize(struct lpfc_vport *vport)
{
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hba *phba = vport->phba;
char name[64];
uint32_t num, i;
@@ -862,7 +1236,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
atomic_set(&lpfc_debugfs_hba_count, 0);
if (!lpfc_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs root\n");
+ "0408 Cannot create debugfs root\n");
goto debug_failed;
}
}
@@ -876,7 +1250,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, lpfc_debugfs_root);
if (!phba->hba_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs hba\n");
+ "0412 Cannot create debugfs hba\n");
goto debug_failed;
}
atomic_inc(&lpfc_debugfs_hba_count);
@@ -890,7 +1264,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_hbqinfo);
if (!phba->debug_hbqinfo) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs hbqinfo\n");
+ "0411 Cannot create debugfs hbqinfo\n");
goto debug_failed;
}
@@ -902,7 +1276,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHBASlim);
if (!phba->debug_dumpHBASlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs dumpHBASlim\n");
+ "0413 Cannot create debugfs dumpHBASlim\n");
goto debug_failed;
}
@@ -914,10 +1288,36 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_dumpHostSlim);
if (!phba->debug_dumpHostSlim) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs dumpHostSlim\n");
+ "0414 Cannot create debugfs dumpHostSlim\n");
+ goto debug_failed;
+ }
+
+ /* Setup dumpData */
+ snprintf(name, sizeof(name), "dumpData");
+ phba->debug_dumpData =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpData);
+ if (!phba->debug_dumpData) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0800 Cannot create debugfs dumpData\n");
+ goto debug_failed;
+ }
+
+ /* Setup dumpDif */
+ snprintf(name, sizeof(name), "dumpDif");
+ phba->debug_dumpDif =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpDif);
+ if (!phba->debug_dumpDif) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0801 Cannot create debugfs dumpDif\n");
goto debug_failed;
}
+
+
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -944,7 +1344,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->debug_slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0415 Cannot create debugfs "
"slow_ring_trace\n");
goto debug_failed;
}
@@ -955,7 +1355,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0416 Cannot create debugfs "
"slow_ring buffer\n");
goto debug_failed;
}
@@ -972,7 +1372,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
debugfs_create_dir(name, phba->hba_debugfs_root);
if (!vport->vport_debugfs_root) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cant create debugfs");
+ "0417 Cant create debugfs");
goto debug_failed;
}
atomic_inc(&phba->debugfs_vport_count);
@@ -1001,7 +1401,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!vport->disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs disc trace "
+ "0418 Cannot create debugfs disc trace "
"buffer\n");
goto debug_failed;
}
@@ -1014,7 +1414,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_disc_trc);
if (!vport->debug_disc_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Cannot create debugfs "
+ "0419 Cannot create debugfs "
"discovery_trace\n");
goto debug_failed;
}
@@ -1033,11 +1433,21 @@ debug_failed:
#endif
}
-
+/**
+ * lpfc_debugfs_terminate - Tear down debugfs infrastructure for this vport.
+ * @vport: The vport pointer to remove from debugfs.
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system elements
+ * that are specific to this vport. It also checks to see if there are any
+ * users left for the debugfs directories associated with the HBA and driver. If
+ * this is the last user of the HBA directory or driver directory then it will
+ * remove those from the debugfs infrastructure as well.
+ **/
inline void
lpfc_debugfs_terminate(struct lpfc_vport *vport)
{
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hba *phba = vport->phba;
if (vport->disc_trc) {
@@ -1072,6 +1482,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
}
+ if (phba->debug_dumpData) {
+ debugfs_remove(phba->debug_dumpData); /* dumpData */
+ phba->debug_dumpData = NULL;
+ }
+
+ if (phba->debug_dumpDif) {
+ debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+ phba->debug_dumpDif = NULL;
+ }
+
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
@@ -1096,5 +1516,3 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
#endif
return;
}
-
-
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 31e86a55391d..03c7313a1012 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -21,7 +21,7 @@
#ifndef _H_LPFC_DEBUG_FS
#define _H_LPFC_DEBUG_FS
-#ifdef CONFIG_LPFC_DEBUG_FS
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 2db0b74b6fad..f29e548a90d1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -37,6 +37,7 @@ enum lpfc_work_type {
LPFC_EVT_KILL,
LPFC_EVT_ELS_RETRY,
LPFC_EVT_DEV_LOSS,
+ LPFC_EVT_FASTPATH_MGMT_EVT,
};
/* structure used to queue event to the discovery tasklet */
@@ -47,6 +48,24 @@ struct lpfc_work_evt {
enum lpfc_work_type evt;
};
+struct lpfc_scsi_check_condition_event;
+struct lpfc_scsi_varqueuedepth_event;
+struct lpfc_scsi_event_header;
+struct lpfc_fabric_event_header;
+struct lpfc_fcprdchkerr_event;
+
+/* structure used for sending events from fast path */
+struct lpfc_fast_path_event {
+ struct lpfc_work_evt work_evt;
+ struct lpfc_vport *vport;
+ union {
+ struct lpfc_scsi_check_condition_event check_cond_evt;
+ struct lpfc_scsi_varqueuedepth_event queue_depth_evt;
+ struct lpfc_scsi_event_header scsi_evt;
+ struct lpfc_fabric_event_header fabric_evt;
+ struct lpfc_fcprdchkerr_event read_check_error;
+ } un;
+};
struct lpfc_nodelist {
struct list_head nlp_listp;
@@ -88,6 +107,10 @@ struct lpfc_nodelist {
unsigned long last_ramp_up_time; /* jiffy of last ramp up */
unsigned long last_q_full_time; /* jiffy of last queue full */
struct kref kref;
+ atomic_t cmd_pending;
+ uint32_t cmd_qdepth;
+ unsigned long last_change_time;
+ struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
};
/* Defines for nlp_flag (uint32) */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f54e0f7eaee3..a8f30bdaff69 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -53,6 +54,28 @@ static void lpfc_register_new_vport(struct lpfc_hba *phba,
static int lpfc_max_els_tries = 3;
+/**
+ * lpfc_els_chk_latt: Check host link attention event for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there is an outstanding host link
+ * attention event during the discovery process with the @vport. It is done
+ * by reading the HBA's Host Attention (HA) register. If there is any host
+ * link attention events during this @vport's discovery process, the @vport
+ * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
+ * be issued if the link state is not already in host link cleared state,
+ * and a return code shall indicate whether the host link attention event
+ * had happened.
+ *
+ * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
+ * state in LPFC_VPORT_READY, the request for checking host link attention
+ * event will be ignored and a return code shall indicate no host link
+ * attention event had happened.
+ *
+ * Return codes
+ * 0 - no host link attention event happened
+ * 1 - host link attention event happened
+ **/
int
lpfc_els_chk_latt(struct lpfc_vport *vport)
{
@@ -92,6 +115,34 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_prep_els_iocb: Allocate and prepare a lpfc iocb data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @expectRsp: flag indicating whether response is expected.
+ * @cmdSize: size of the ELS command.
+ * @retry: number of retries to the command IOCB when it fails.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: destination identifier.
+ * @elscmd: the ELS command code.
+ *
+ * This routine is used for allocating a lpfc-IOCB data structure from
+ * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the ELS command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic IOCB data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ * Pointer to the newly allocated/prepared els iocb data structure
+ * NULL - when els iocb data structure allocation/preparation failed
+ **/
static struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
uint16_t cmdSize, uint8_t retry,
@@ -150,7 +201,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.elsreq64.remoteID = did; /* DID */
if (expectRsp) {
icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
@@ -170,7 +221,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* For ELS_REQUEST64_CR, use the VPI by default */
icmd->ulpContext = vport->vpi;
icmd->ulpCt_h = 0;
- icmd->ulpCt_l = 1;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ icmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ icmd->ulpCt_l = 1; /* context = VPI */
}
bpl = (struct ulp_bde64 *) pbuflist->virt;
@@ -185,7 +240,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
}
@@ -220,7 +275,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
return elsiocb;
els_iocb_free_pbuf_exit:
- lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+ if (expectRsp)
+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
els_iocb_free_prsp_exit:
@@ -233,6 +289,22 @@ els_iocb_free_pcmb_exit:
return NULL;
}
+/**
+ * lpfc_issue_fabric_reglogin: Issue fabric registration login for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a fabric registration login for a @vport. An
+ * active ndlp node with Fabric_DID must already exist for this @vport.
+ * The routine invokes two mailbox commands to carry out fabric registration
+ * login through the HBA firmware: the first mailbox command requests the
+ * HBA to perform link configuration for the @vport; and the second mailbox
+ * command requests the HBA to perform the actual fabric registration login
+ * with the @vport.
+ *
+ * Return code
+ * 0 - successfully issued fabric registration login for @vport
+ * -ENXIO -- failed to issue fabric registration login for @vport
+ **/
static int
lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
@@ -313,6 +385,26 @@ fail:
return -ENXIO;
}
+/**
+ * lpfc_cmpl_els_flogi_fabric: Completion function for flogi to a fabric port.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into a fabric
+ * port in a fabric topology. It properly sets up the parameters to the @ndlp
+ * from the IOCB response. It also check the newly assigned N_Port ID to the
+ * @vport against the previously assigned N_Port ID. If it is different from
+ * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
+ * is invoked on all the remaining nodes with the @vport to unregister the
+ * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp, IOCB_t *irsp)
@@ -387,7 +479,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
list_for_each_entry_safe(np, next_np,
&vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
+ if (!NLP_CHK_NODE_ACT(np))
continue;
if ((np->nlp_state != NLP_STE_NPR_NODE) ||
!(np->nlp_flag & NLP_NPR_ADISC))
@@ -416,9 +508,26 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
-/*
- * We FLOGIed into an NPort, initiate pt2pt protocol
- */
+/**
+ * lpfc_cmpl_els_flogi_nport: Completion function for flogi to an N_Port.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
+ * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
+ * in a point-to-point topology. First, the @vport's N_Port Name is compared
+ * with the received N_Port Name: if the @vport's N_Port Name is greater than
+ * the received N_Port Name lexicographically, this node shall assign local
+ * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
+ * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
+ * this node shall just wait for the remote node to issue PLOGI and assign
+ * N_Port IDs.
+ *
+ * Return code
+ * 0 - Success
+ * -ENXIO - Fail
+ **/
static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct serv_parm *sp)
@@ -516,6 +625,29 @@ fail:
return -ENXIO;
}
+/**
+ * lpfc_cmpl_els_flogi: Completion callback function for flogi.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the top-level completion callback function for issuing
+ * a Fabric Login (FLOGI) command. If the response IOCB reported error,
+ * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
+ * retry has been made (either immediately or delayed with lpfc_els_retry()
+ * returning 1), the command IOCB will be released and function returned.
+ * If the retry attempt has been given up (possibly reach the maximum
+ * number of retries), one additional decrement of ndlp reference shall be
+ * invoked before going out after releasing the command IOCB. This will
+ * actually release the remote node (Note, lpfc_els_free_iocb() will also
+ * invoke one decrement of ndlp reference count). If no error reported in
+ * the IOCB status, the command Port ID field is used to determine whether
+ * this is a point-to-point topology or a fabric topology: if the Port ID
+ * field is assigned, it is a fabric topology; otherwise, it is a
+ * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
+ * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
+ * specific topology completion conditions.
+ **/
static void
lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -618,6 +750,28 @@ out:
lpfc_els_free_iocb(phba, cmdiocb);
}
+/**
+ * lpfc_issue_els_flogi: Issue an flogi iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fabric Login (FLOGI) Request ELS command
+ * for a @vport. The initiator service parameters are put into the payload
+ * of the FLOGI Request IOCB and the top-level callback function pointer
+ * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
+ * function field. The lpfc_issue_fabric_iocb routine is invoked to send
+ * out FLOGI ELS command with one outstanding fabric IOCB at a time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FLOGI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued flogi iocb for @vport
+ * 1 - failed to issue flogi iocb for @vport
+ **/
static int
lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -694,6 +848,20 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_els_abort_flogi: Abort all outstanding flogi iocbs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
+ * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
+ * list and issues an abort IOCB commond on each outstanding IOCB that
+ * contains a active Fabric_DID ndlp. Note that this function is to issue
+ * the abort IOCB command on all the outstanding IOCBs, thus when this
+ * function returns, it does not guarantee all the IOCBs are actually aborted.
+ *
+ * Return code
+ * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0)
+ **/
int
lpfc_els_abort_flogi(struct lpfc_hba *phba)
{
@@ -729,6 +897,22 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_initial_flogi: Issue an initial fabric login for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Login (FLOGI) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
+ * is then invoked with the @vport and the ndlp to perform the FLOGI for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial flogi for @vport
+ * 1 - successfully issued initial flogi for @vport
+ **/
int
lpfc_initial_flogi(struct lpfc_vport *vport)
{
@@ -764,6 +948,22 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_initial_fdisc: Issue an initial fabric discovery for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues an initial Fabric Discover (FDISC) for the @vport
+ * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
+ * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
+ * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
+ * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
+ * is then invoked with the @vport and the ndlp to perform the FDISC for the
+ * @vport.
+ *
+ * Return code
+ * 0 - failed to issue initial fdisc for @vport
+ * 1 - successfully issued initial fdisc for @vport
+ **/
int
lpfc_initial_fdisc(struct lpfc_vport *vport)
{
@@ -797,6 +997,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_more_plogi: Check and issue remaining plogis for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether there are more remaining Port Logins
+ * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
+ * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
+ * to issue ELS PLOGIs up to the configured discover threads with the
+ * @vport (@vport->cfg_discovery_threads). The function also decrement
+ * the @vport's num_disc_node by 1 if it is not already 0.
+ **/
void
lpfc_more_plogi(struct lpfc_vport *vport)
{
@@ -819,6 +1030,37 @@ lpfc_more_plogi(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_plogi_confirm_nport: Confirm pologi wwpn matches stored ndlp.
+ * @phba: pointer to lpfc hba data structure.
+ * @prsp: pointer to response IOCB payload.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine checks and indicates whether the WWPN of an N_Port, retrieved
+ * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
+ * The following cases are considered N_Port confirmed:
+ * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
+ * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
+ * it does not have WWPN assigned either. If the WWPN is confirmed, the
+ * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
+ * 1) if there is a node on vport list other than the @ndlp with the same
+ * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
+ * on that node to release the RPI associated with the node; 2) if there is
+ * no node found on vport list with the same WWPN of the N_Port PLOGI logged
+ * into, a new node shall be allocated (or activated). In either case, the
+ * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
+ * be released and the new_ndlp shall be put on to the vport node list and
+ * its pointer returned as the confirmed node.
+ *
+ * Note that before the @ndlp got "released", the keepDID from not-matching
+ * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
+ * of the @ndlp. This is because the release of @ndlp is actually to put it
+ * into an inactive state on the vport node list and the vport node list
+ * management algorithm does not allow two node with a same DID.
+ *
+ * Return code
+ * pointer to the PLOGI N_Port @ndlp
+ **/
static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
@@ -922,6 +1164,17 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
return new_ndlp;
}
+/**
+ * lpfc_end_rscn: Check and handle more rscn for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine checks whether more Registration State Change
+ * Notifications (RSCNs) came in while the discovery state machine was in
+ * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
+ * invoked to handle the additional RSCNs for the @vport. Otherwise, the
+ * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
+ * handling the RSCNs.
+ **/
void
lpfc_end_rscn(struct lpfc_vport *vport)
{
@@ -943,6 +1196,26 @@ lpfc_end_rscn(struct lpfc_vport *vport)
}
}
+/**
+ * lpfc_cmpl_els_plogi: Completion callback function for plogi.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for issuing the Port
+ * Login (PLOGI) command. For PLOGI completion, there must be an active
+ * ndlp on the vport node list that matches the remote node ID from the
+ * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
+ * ignored and command IOCB released. The PLOGI response IOCB status is
+ * checked for error conditons. If there is error status reported, PLOGI
+ * retry shall be attempted by invoking the lpfc_els_retry() routine.
+ * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
+ * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
+ * (DSM) is set for this PLOGI completion. Finally, it checks whether
+ * there are additional N_Port nodes with the vport that need to perform
+ * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
+ * PLOGIs.
+ **/
static void
lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1048,6 +1321,27 @@ out:
return;
}
+/**
+ * lpfc_issue_els_plogi: Issue an plogi iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: destination port identifier.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Port Login (PLOGI) command to a remote N_Port
+ * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
+ * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
+ * This routine constructs the proper feilds of the PLOGI IOCB and invokes
+ * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PLOGI ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued a plogi for @vport
+ * 1 - failed to issue a plogi for @vport
+ **/
int
lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
{
@@ -1106,6 +1400,19 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
return 0;
}
+/**
+ * lpfc_cmpl_els_prli: Completion callback function for prli.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for a Process Login
+ * (PRLI) ELS command. The PRLI response IOCB status is checked for error
+ * status. If there is error status reported, PRLI retry shall be attempted
+ * by invoking the lpfc_els_retry() routine. Otherwise, the state
+ * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
+ * ndlp to mark the PRLI completion.
+ **/
static void
lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1164,6 +1471,27 @@ out:
return;
}
+/**
+ * lpfc_issue_els_prli: Issue a prli iocb command for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Process Login (PRLI) ELS command for the
+ * @vport. The PRLI service parameters are set up in the payload of the
+ * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
+ * is put to the IOCB completion callback func field before invoking the
+ * routine lpfc_sli_issue_iocb() to send out PRLI command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI ELS command.
+ *
+ * Return code
+ * 0 - successfully issued prli iocb command for @vport
+ * 1 - failed to issue prli iocb command for @vport
+ **/
int
lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1233,6 +1561,92 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_rscn_disc: Perform rscn discovery for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine performs Registration State Change Notification (RSCN)
+ * discovery for a @vport. If the @vport's node port recovery count is not
+ * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
+ * the nodes that need recovery. If none of the PLOGI were needed through
+ * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
+ * invoked to check and handle possible more RSCN came in during the period
+ * of processing the current ones.
+ **/
+static void
+lpfc_rscn_disc(struct lpfc_vport *vport)
+{
+ lpfc_can_disctmo(vport);
+
+ /* RSCN discovery */
+ /* go thru NPR nodes and issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ if (lpfc_els_disc_plogi(vport))
+ return;
+
+ lpfc_end_rscn(vport);
+}
+
+/**
+ * lpfc_adisc_done: Complete the adisc phase of discovery.
+ * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
+ *
+ * This function is called when the final ADISC is completed during discovery.
+ * This function handles clearing link attention or issuing reg_vpi depending
+ * on whether npiv is enabled. This function also kicks off the PLOGI phase of
+ * discovery.
+ * This function is called with no locks held.
+ **/
+static void
+lpfc_adisc_done(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
+
+ /*
+ * For NPIV, cmpl_reg_vpi will set port_state to READY,
+ * and continue discovery.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ !(vport->fc_flag & FC_RSCN_MODE)) {
+ lpfc_issue_reg_vpi(phba, vport);
+ return;
+ }
+ /*
+ * For SLI2, we need to set port_state to READY
+ * and continue discovery.
+ */
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* If we get here, there is nothing to ADISC */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_issue_clear_la(phba, vport);
+ if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
+ vport->num_disc_nodes = 0;
+ /* go thru NPR list, issue ELS PLOGIs */
+ if (vport->fc_npr_cnt)
+ lpfc_els_disc_plogi(vport);
+ if (!vport->num_disc_nodes) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ vport->port_state = LPFC_VPORT_READY;
+ } else
+ lpfc_rscn_disc(vport);
+}
+
+/**
+ * lpfc_more_adisc: Issue more adisc as needed.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine determines whether there are more ndlps on a @vport
+ * node list need to have Address Discover (ADISC) issued. If so, it will
+ * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
+ * remaining nodes which need to have ADISC sent.
+ **/
void
lpfc_more_adisc(struct lpfc_vport *vport)
{
@@ -1252,23 +1666,27 @@ lpfc_more_adisc(struct lpfc_vport *vport)
/* go thru NPR nodes and issue any remaining ELS ADISCs */
sentadisc = lpfc_els_disc_adisc(vport);
}
+ if (!vport->num_disc_nodes)
+ lpfc_adisc_done(vport);
return;
}
-static void
-lpfc_rscn_disc(struct lpfc_vport *vport)
-{
- lpfc_can_disctmo(vport);
-
- /* RSCN discovery */
- /* go thru NPR nodes and issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
- if (lpfc_els_disc_plogi(vport))
- return;
-
- lpfc_end_rscn(vport);
-}
-
+/**
+ * lpfc_cmpl_els_adisc: Completion callback function for adisc.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the Address Discover
+ * (ADISC) command. It first checks to see whether link went down during
+ * the discovery process. If so, the node will be marked as node port
+ * recovery for issuing discover IOCB by the link attention handler and
+ * exit. Otherwise, the response status is checked. If error was reported
+ * in the response status, the ADISC command shall be retried by invoking
+ * the lpfc_els_retry() routine. Otherwise, if no error was reported in
+ * the response status, the state machine is invoked to set transition
+ * with respect to NLP_EVT_CMPL_ADISC event.
+ **/
static void
lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1333,57 +1751,34 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_ADISC);
- if (disc && vport->num_disc_nodes) {
- /* Check to see if there are more ADISCs to be sent */
+ /* Check to see if there are more ADISCs to be sent */
+ if (disc && vport->num_disc_nodes)
lpfc_more_adisc(vport);
-
- /* Check to see if we are done with ADISC authentication */
- if (vport->num_disc_nodes == 0) {
- /* If we get here, there is nothing left to ADISC */
- /*
- * For NPIV, cmpl_reg_vpi will set port_state to READY,
- * and continue discovery.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- !(vport->fc_flag & FC_RSCN_MODE)) {
- lpfc_issue_reg_vpi(phba, vport);
- goto out;
- }
- /*
- * For SLI2, we need to set port_state to READY
- * and continue discovery.
- */
- if (vport->port_state < LPFC_VPORT_READY) {
- /* If we get here, there is nothing to ADISC */
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- lpfc_issue_clear_la(phba, vport);
-
- if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
- vport->num_disc_nodes = 0;
- /* go thru NPR list, issue ELS PLOGIs */
- if (vport->fc_npr_cnt)
- lpfc_els_disc_plogi(vport);
-
- if (!vport->num_disc_nodes) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &=
- ~FC_NDISC_ACTIVE;
- spin_unlock_irq(
- shost->host_lock);
- lpfc_can_disctmo(vport);
- }
- }
- vport->port_state = LPFC_VPORT_READY;
- } else {
- lpfc_rscn_disc(vport);
- }
- }
- }
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
+/**
+ * lpfc_issue_els_adisc: Issue an address discover iocb to an node on a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues an Address Discover (ADISC) for an @ndlp on a
+ * @vport. It prepares the payload of the ADISC ELS command, updates the
+ * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
+ * to issue the ADISC ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC ELS command.
+ *
+ * Return code
+ * 0 - successfully issued adisc
+ * 1 - failed to issue adisc
+ **/
int
lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1437,6 +1832,18 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_logo: Completion callback function for logo.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion function for issuing the ELS Logout (LOGO)
+ * command. If no error status was reported from the LOGO response, the
+ * state machine of the associated ndlp shall be invoked for transition with
+ * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
+ * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
+ **/
static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1502,6 +1909,26 @@ out:
return;
}
+/**
+ * lpfc_issue_els_logo: Issue a logo to an node on a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine constructs and issues an ELS Logout (LOGO) iocb command
+ * to a remote node, referred by an @ndlp on a @vport. It constructs the
+ * payload of the IOCB, properly sets up the @ndlp state, and invokes the
+ * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return code
+ * 0 - successfully issued logo
+ * 1 - failed to issue logo
+ **/
int
lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -1563,6 +1990,22 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_cmd: Completion callback function for generic els command.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is a generic completion callback function for ELS commands.
+ * Specifically, it is the callback function which does not need to perform
+ * any command specific operations. It is currently used by the ELS command
+ * issuing routines for the ELS State Change Request (SCR),
+ * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
+ * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
+ * certain debug loggings, this callback function simply invokes the
+ * lpfc_els_chk_latt() routine to check whether link went down during the
+ * discovery process.
+ **/
static void
lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1587,6 +2030,28 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_issue_els_scr: Issue a scr to an node on a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a State Change Request (SCR) to a fabric node
+ * on a @vport. The remote node @nportid is passed into the function. It
+ * first search the @vport node list to find the matching ndlp. If no such
+ * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
+ * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
+ * routine is invoked to send the SCR IOCB.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the SCR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued scr command
+ * 1 - Failed to issue scr command
+ **/
int
lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
@@ -1659,6 +2124,28 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 0;
}
+/**
+ * lpfc_issue_els_farpr: Issue a farp to an node on a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nportid: N_Port identifier to the remote node.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine issues a Fibre Channel Address Resolution Response
+ * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
+ * is passed into the function. It first search the @vport node list to find
+ * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
+ * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
+ * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PARPR ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued farpr command
+ * 1 - Failed to issue farpr command
+ **/
static int
lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
{
@@ -1748,6 +2235,18 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
return 0;
}
+/**
+ * lpfc_cancel_retry_delay_tmo: Cancel the timer with delayed iocb-cmd retry.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @nlp: pointer to a node-list data structure.
+ *
+ * This routine cancels the timer with a delayed IOCB-command retry for
+ * a @vport's @ndlp. It stops the timer for the delayed function retrial and
+ * removes the ELS retry event if it presents. In addition, if the
+ * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
+ * commands are sent for the @vport's nodes that require issuing discovery
+ * ADISC.
+ **/
void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
{
@@ -1775,25 +2274,36 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (vport->port_state < LPFC_VPORT_READY) {
/* Check if there are more ADISCs to be sent */
lpfc_more_adisc(vport);
- if ((vport->num_disc_nodes == 0) &&
- (vport->fc_npr_cnt))
- lpfc_els_disc_plogi(vport);
} else {
/* Check if there are more PLOGIs to be sent */
lpfc_more_plogi(vport);
- }
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
}
}
}
return;
}
+/**
+ * lpfc_els_retry_delay: Timer function with a ndlp delayed function timer.
+ * @ptr: holder for the pointer to the timer function associated data (ndlp).
+ *
+ * This routine is invoked by the ndlp delayed-function timer to check
+ * whether there is any pending ELS retry event(s) with the node. If not, it
+ * simply returns. Otherwise, if there is at least one ELS delayed event, it
+ * adds the delayed events to the HBA work list and invokes the
+ * lpfc_worker_wake_up() routine to wake up worker thread to process the
+ * event. Note that lpfc_nlp_get() is called before posting the event to
+ * the work list to hold reference count of ndlp so that it guarantees the
+ * reference to ndlp will still be available when the worker thread gets
+ * to the event associated with the ndlp.
+ **/
void
lpfc_els_retry_delay(unsigned long ptr)
{
@@ -1822,6 +2332,15 @@ lpfc_els_retry_delay(unsigned long ptr)
return;
}
+/**
+ * lpfc_els_retry_delay_handler: Work thread handler for ndlp delayed function.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine is the worker-thread handler for processing the @ndlp delayed
+ * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
+ * the last ELS command from the associated ndlp and invokes the proper ELS
+ * function according to the delayed ELS command to retry the command.
+ **/
void
lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
{
@@ -1884,6 +2403,27 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
return;
}
+/**
+ * lpfc_els_retry: Make retry decision on an els command iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine makes a retry decision on an ELS command IOCB, which has
+ * failed. The following ELS IOCBs use this function for retrying the command
+ * when previously issued command responsed with error status: FLOGI, PLOGI,
+ * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
+ * returned error status, it makes the decision whether a retry shall be
+ * issued for the command, and whether a retry shall be made immediately or
+ * delayed. In the former case, the corresponding ELS command issuing-function
+ * is called to retry the command. In the later case, the ELS command shall
+ * be posted to the ndlp delayed event and delayed function timer set to the
+ * ndlp for the delayed command issusing.
+ *
+ * Return code
+ * 0 - No retry of els command is made
+ * 1 - Immediate or delayed retry of els command is made
+ **/
static int
lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -1933,6 +2473,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
+ if (cmd == ELS_CMD_FLOGI) {
+ if (PCI_DEVICE_ID_HORNET ==
+ phba->pcidev->device) {
+ phba->fc_topology = TOPOLOGY_LOOP;
+ phba->pport->fc_myDID = 0;
+ phba->alpa_map[0] = 0;
+ phba->alpa_map[1] = 0;
+ }
+ }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -2051,7 +2600,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0123 FDISC Failed (x%x). "
+ "0122 FDISC Failed (x%x). "
"Fabric Detected Bad WWN\n",
stat.un.lsRjtError);
lpfc_vport_set_state(vport,
@@ -2182,12 +2731,26 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_free_data: Free lpfc dma buffer and data structure with an iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
+ *
+ * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
+ * associated with a command IOCB back to the lpfc DMA buffer pool. It first
+ * checks to see whether there is a lpfc DMA buffer associated with the
+ * response of the command IOCB. If so, it will be released before releasing
+ * the lpfc DMA buffer associated with the IOCB itself.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
static int
lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
{
struct lpfc_dmabuf *buf_ptr;
- /* Free the response before processing the command. */
+ /* Free the response before processing the command. */
if (!list_empty(&buf_ptr1->list)) {
list_remove_head(&buf_ptr1->list, buf_ptr,
struct lpfc_dmabuf,
@@ -2200,6 +2763,18 @@ lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
return 0;
}
+/**
+ * lpfc_els_free_bpl: Free lpfc dma buffer and data structure with bpl.
+ * @phba: pointer to lpfc hba data structure.
+ * @buf_ptr: pointer to the lpfc dma buffer data structure.
+ *
+ * This routine releases the lpfc Direct Memory Access (DMA) buffer
+ * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
+ * pool.
+ *
+ * Return code
+ * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
+ **/
static int
lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
{
@@ -2208,6 +2783,33 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
return 0;
}
+/**
+ * lpfc_els_free_iocb: Free a command iocb and its associated resources.
+ * @phba: pointer to lpfc hba data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine frees a command IOCB and its associated resources. The
+ * command IOCB data structure contains the reference to various associated
+ * resources, these fields must be set to NULL if the associated reference
+ * not present:
+ * context1 - reference to ndlp
+ * context2 - reference to cmd
+ * context2->next - reference to rsp
+ * context3 - reference to bpl
+ *
+ * It first properly decrements the reference count held on ndlp for the
+ * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
+ * set, it invokes the lpfc_els_free_data() routine to release the Direct
+ * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
+ * adds the DMA buffer the @phba data structure for the delayed release.
+ * If reference to the Buffer Pointer List (BPL) is present, the
+ * lpfc_els_free_bpl() routine is invoked to release the DMA memory
+ * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
+ * invoked to release the IOCB data structure back to @phba IOCBQ list.
+ *
+ * Return code
+ * 0 - Success (currently, always return 0)
+ **/
int
lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
@@ -2274,6 +2876,23 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
return 0;
}
+/**
+ * lpfc_cmpl_els_logo_acc: Completion callback function to logo acc response.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the Logout (LOGO)
+ * Accept (ACC) Response ELS command. This routine is invoked to indicate
+ * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
+ * release the ndlp if it has the last reference remaining (reference count
+ * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * field to NULL to inform the following lpfc_els_free_iocb() routine no
+ * ndlp reference count needs to be decremented. Otherwise, the ndlp
+ * reference use-count shall be decremented by the lpfc_els_free_iocb()
+ * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
+ * IOCB data structure.
+ **/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -2311,6 +2930,19 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_mbx_cmpl_dflt_rpi: Completion callbk func for unreg dflt rpi mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for unregister default
+ * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
+ * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
+ * decrements the ndlp reference count held for this completion callback
+ * function. After that, it invokes the lpfc_nlp_not_used() to check
+ * whether there is only one reference left on the ndlp. If so, it will
+ * perform one more decrement and trigger the release of the ndlp.
+ **/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -2332,6 +2964,22 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_cmpl_els_rsp: Completion callback function for els response iocb cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function for ELS Response IOCB
+ * command. In normal case, this callback function just properly sets the
+ * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
+ * field in the command IOCB is not NULL, the referred mailbox command will
+ * be send out, and then invokes the lpfc_els_free_iocb() routine to release
+ * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
+ * link down event occurred during the discovery, the lpfc_nlp_not_used()
+ * routine shall be invoked trying to release the ndlp if no other threads
+ * are currently referring it.
+ **/
static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -2487,6 +3135,31 @@ out:
return;
}
+/**
+ * lpfc_els_rsp_acc: Prepare and issue an acc response iocb command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @flag: the els command code to be accepted.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issues an Accept (ACC) response IOCB
+ * command. It uses the @flag to properly set up the IOCB field for the
+ * specific ACC response command to be issued and invokes the
+ * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
+ * @mbox pointer is passed in, it will be put into the context_un.mbox
+ * field of the IOCB for the completion callback function to issue the
+ * mailbox command to the HBA later when callback is invoked.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the corresponding response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc response
+ * 1 - Failed to issue acc response
+ **/
int
lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2601,6 +3274,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 0;
}
+/**
+ * lpfc_els_rsp_reject: Propare and issue a rjt response iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @rejectError:
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine prepares and issue an Reject (RJT) response IOCB
+ * command. If a @mbox pointer is passed in, it will be put into the
+ * context_un.mbox field of the IOCB for the completion callback function
+ * to issue to the HBA later.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the reject response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued reject response
+ * 1 - Failed to issue reject response
+ **/
int
lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
@@ -2660,6 +3355,25 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
return 0;
}
+/**
+ * lpfc_els_rsp_adisc_acc: Prepare and issue acc response to adisc iocb cmd.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Address
+ * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the ADISC Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc adisc response
+ * 1 - Failed to issue adisc acc response
+ **/
int
lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
@@ -2716,6 +3430,25 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_prli_acc: Prepare and issue acc response to prli iocb cmd.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine prepares and issues an Accept (ACC) response to Process
+ * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
+ * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the PRLI Accept response ELS IOCB command.
+ *
+ * Return code
+ * 0 - Successfully issued acc prli response
+ * 1 - Failed to issue acc prli response
+ **/
int
lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_nodelist *ndlp)
@@ -2795,6 +3528,32 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rnid_acc: Issue rnid acc response iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @format: rnid command format.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a Request Node Identification Data (RNID) Accept
+ * (ACC) response. It constructs the RNID ACC response command according to
+ * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
+ * issue the response. Note that this command does not need to hold the ndlp
+ * reference count for the callback. So, the ndlp reference count taken by
+ * the lpfc_prep_els_iocb() routine is put back and the context1 field of
+ * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
+ * there is no ndlp reference available.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function. However, for the RNID Accept Response ELS command,
+ * this is undone later by this routine after the IOCB is allocated.
+ *
+ * Return code
+ * 0 - Successfully issued acc rnid response
+ * 1 - Failed to issue acc rnid response
+ **/
static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -2875,6 +3634,25 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
return 0;
}
+/**
+ * lpfc_els_disc_adisc: Issue remaining adisc iocbs to npr nodes of a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Address Discover (ADISC) ELS commands to those
+ * N_Ports which are in node port recovery state and ADISC has not been issued
+ * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
+ * lpfc_issue_els_adisc() routine, the per @vport number of discover count
+ * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
+ * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
+ * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
+ * IOCBs quit for later pick up. On the other hand, after walking through
+ * all the ndlps with the @vport and there is none ADISC IOCB issued, the
+ * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
+ * no more ADISC need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with adisc issued.
+ **/
int
lpfc_els_disc_adisc(struct lpfc_vport *vport)
{
@@ -2914,6 +3692,25 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
return sentadisc;
}
+/**
+ * lpfc_els_disc_plogi: Issue plogi for all npr nodes of a vport before adisc.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
+ * which are in node port recovery state, with a @vport. Each time an ELS
+ * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
+ * the per @vport number of discover count (num_disc_nodes) shall be
+ * incremented. If the num_disc_nodes reaches a pre-configured threshold
+ * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
+ * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
+ * later pick up. On the other hand, after walking through all the ndlps with
+ * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
+ * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
+ * PLOGI need to be sent.
+ *
+ * Return code
+ * The number of N_Ports with plogi issued.
+ **/
int
lpfc_els_disc_plogi(struct lpfc_vport *vport)
{
@@ -2954,6 +3751,15 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
+/**
+ * lpfc_els_flush_rscn: Clean up any rscn activities with a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine cleans up any Registration State Change Notification
+ * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
+ * @vport together with the host_lock is used to prevent multiple thread
+ * trying to access the RSCN array on a same @vport at the same time.
+ **/
void
lpfc_els_flush_rscn(struct lpfc_vport *vport)
{
@@ -2984,6 +3790,18 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
vport->fc_rscn_flush = 0;
}
+/**
+ * lpfc_rscn_payload_check: Check whether there is a pending rscn to a did.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @did: remote destination port identifier.
+ *
+ * This routine checks whether there is any pending Registration State
+ * Configuration Notification (RSCN) to a @did on @vport.
+ *
+ * Return code
+ * None zero - The @did matched with a pending rscn
+ * 0 - not able to match @did with a pending rscn
+ **/
int
lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
{
@@ -3019,27 +3837,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
while (payload_len) {
rscn_did.un.word = be32_to_cpu(*lp++);
payload_len -= sizeof(uint32_t);
- switch (rscn_did.un.b.resv) {
- case 0: /* Single N_Port ID effected */
+ switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
+ case RSCN_ADDRESS_FORMAT_PORT:
if (ns_did.un.word == rscn_did.un.word)
goto return_did_out;
break;
- case 1: /* Whole N_Port Area effected */
+ case RSCN_ADDRESS_FORMAT_AREA:
if ((ns_did.un.b.domain == rscn_did.un.b.domain)
&& (ns_did.un.b.area == rscn_did.un.b.area))
goto return_did_out;
break;
- case 2: /* Whole N_Port Domain effected */
+ case RSCN_ADDRESS_FORMAT_DOMAIN:
if (ns_did.un.b.domain == rscn_did.un.b.domain)
goto return_did_out;
break;
- default:
- /* Unknown Identifier in RSCN node */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0217 Unknown Identifier in "
- "RSCN payload Data: x%x\n",
- rscn_did.un.word);
- case 3: /* Whole Fabric effected */
+ case RSCN_ADDRESS_FORMAT_FABRIC:
goto return_did_out;
}
}
@@ -3053,6 +3865,17 @@ return_did_out:
return did;
}
+/**
+ * lpfc_rscn_recovery_check: Send recovery event to vport nodes matching rscn
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
+ * state machine for a @vport's nodes that are with pending RSCN (Registration
+ * State Change Notification).
+ *
+ * Return code
+ * 0 - Successful (currently alway return 0)
+ **/
static int
lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
@@ -3071,6 +3894,71 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
return 0;
}
+/**
+ * lpfc_send_rscn_event: Send an RSCN event to management application.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ *
+ * lpfc_send_rscn_event sends an RSCN netlink event to management
+ * applications.
+ */
+static void
+lpfc_send_rscn_event(struct lpfc_vport *vport,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_dmabuf *pcmd;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ uint32_t *payload_ptr;
+ uint32_t payload_len;
+ struct lpfc_rscn_event_header *rscn_event_data;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ payload_ptr = (uint32_t *) pcmd->virt;
+ payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
+
+ rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
+ payload_len, GFP_KERNEL);
+ if (!rscn_event_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0147 Failed to allocate memory for RSCN event\n");
+ return;
+ }
+ rscn_event_data->event_type = FC_REG_RSCN_EVENT;
+ rscn_event_data->payload_length = payload_len;
+ memcpy(rscn_event_data->rscn_payload, payload_ptr,
+ payload_len);
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_els_event_header) + payload_len,
+ (char *)rscn_event_data,
+ LPFC_NL_VENDOR_ID);
+
+ kfree(rscn_event_data);
+}
+
+/**
+ * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes an unsolicited RSCN (Registration State Change
+ * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
+ * to invoke fc_host_post_event() routine to the FC transport layer. If the
+ * discover state machine is about to begin discovery, it just accepts the
+ * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
+ * contains N_Port IDs for other vports on this HBA, it just accepts the
+ * RSCN and ignore processing it. If the state machine is in the recovery
+ * state, the fc_rscn_id_list of this @vport is walked and the
+ * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
+ * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
+ * routine is invoked to handle the RSCN event.
+ *
+ * Return code
+ * 0 - Just sent the acc response
+ * 1 - Sent the acc response and waited for name server completion
+ **/
static int
lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3096,6 +3984,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"0214 RSCN received Data: x%x x%x x%x x%x\n",
vport->fc_flag, payload_len, *lp,
vport->fc_rscn_id_cnt);
+
+ /* Send an RSCN event to the management application */
+ lpfc_send_rscn_event(vport, cmdiocb);
+
for (i = 0; i < payload_len/sizeof(uint32_t); i++)
fc_host_post_event(shost, fc_get_event_number(),
FCH_EVT_RSCN, lp[i]);
@@ -3130,7 +4022,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (rscn_id == hba_id) {
/* ALL NPortIDs in RSCN are on HBA */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0214 Ignore RSCN "
+ "0219 Ignore RSCN "
"Data: x%x x%x x%x x%x\n",
vport->fc_flag, payload_len,
*lp, vport->fc_rscn_id_cnt);
@@ -3241,6 +4133,22 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return lpfc_els_handle_rscn(vport);
}
+/**
+ * lpfc_els_handle_rscn: Handle rscn for a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine handles the Registration State Configuration Notification
+ * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
+ * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
+ * if the ndlp to NameServer exists, a Common Transport (CT) command to the
+ * NameServer shall be issued. If CT command to the NameServer fails to be
+ * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
+ * RSCN activities with the @vport.
+ *
+ * Return code
+ * 0 - Cleaned up rscn on the @vport
+ * 1 - Wait for plogi to name server before proceed
+ **/
int
lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
@@ -3313,6 +4221,31 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
return 0;
}
+/**
+ * lpfc_els_rcv_flogi: Process an unsolicited flogi iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
+ * unsolicited event. An unsolicited FLOGI can be received in a point-to-
+ * point topology. As an unsolicited FLOGI should not be received in a loop
+ * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
+ * lpfc_check_sparm() routine is invoked to check the parameters in the
+ * unsolicited FLOGI. If parameters validation failed, the routine
+ * lpfc_els_rsp_reject() shall be called with reject reason code set to
+ * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
+ * FLOGI shall be compared with the Port WWN of the @vport to determine who
+ * will initiate PLOGI. The higher lexicographical value party shall has
+ * higher priority (as the winning port) and will initiate PLOGI and
+ * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
+ * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
+ * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
+ *
+ * Return code
+ * 0 - Successfully processed the unsolicited flogi
+ * 1 - Failed to process the unsolicited flogi
+ **/
static int
lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3402,6 +4335,22 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_rnid: Process an unsolicited rnid iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Request Node Identification Data (RNID) IOCB
+ * received as an ELS unsolicited event. Only when the RNID specified format
+ * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
+ * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
+ * Accept (ACC) the RNID ELS command. All the other RNID formats are
+ * rejected by invoking the lpfc_els_rsp_reject() routine.
+ *
+ * Return code
+ * 0 - Successfully processed rnid iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3441,6 +4390,19 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_lirr: Process an unsolicited lirr iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Link Incident Report Registration(LIRR) IOCB
+ * received as an ELS unsolicited event. Currently, this function just invokes
+ * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
+ *
+ * Return code
+ * 0 - Successfully processed lirr iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3456,6 +4418,25 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rps_acc: Completion callbk func for MBX_READ_LNK_STAT mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
static void
lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -3531,6 +4512,24 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_els_rcv_rps: Process an unsolicited rps iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPS) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPS Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rps iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3544,14 +4543,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
- (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
- stat.un.b.lsRjtRsvd0 = 0;
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
- stat.un.b.vendorUnique = 0;
- lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
- NULL);
- }
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -3584,6 +4578,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
}
+
+reject_out:
+ /* issue rejection response */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
@@ -3592,6 +4589,25 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rsp_rpl_acc: Issue an accept rpl els command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdsize: size of the ELS command.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
+ * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPL Accept Response ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued ACC RPL ELS command
+ * 1 - Failed to issue ACC RPL ELS command
+ **/
static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
@@ -3645,6 +4661,22 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
return 0;
}
+/**
+ * lpfc_els_rcv_rpl: Process an unsolicited rpl iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port List (RPL) IOCB received as an ELS
+ * unsolicited event. It first checks the remote port state. If the remote
+ * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
+ * invokes the lpfc_els_rsp_reject() routine to send reject response.
+ * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
+ * to accept the RPL.
+ *
+ * Return code
+ * 0 - Successfully processed rpl iocb (currently always return 0)
+ **/
static int
lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3658,12 +4690,15 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ /* issue rejection response */
stat.un.b.lsRjtRsvd0 = 0;
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
stat.un.b.vendorUnique = 0;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL);
+ /* rejected the unsolicited RPL request and done with it */
+ return 0;
}
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -3685,6 +4720,30 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_farp: Process an unsolicited farp request els command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
+ * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
+ * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
+ * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
+ * remote PortName is compared against the FC PortName stored in the @vport
+ * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
+ * compared against the FC NodeName stored in the @vport data structure.
+ * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
+ * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
+ * invoked to send out FARP Response to the remote node. Before sending the
+ * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
+ * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
+ * routine is invoked to log into the remote port first.
+ *
+ * Return code
+ * 0 - Either the FARP Match Mode not supported or successfully processed
+ **/
static int
lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3744,6 +4803,20 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_farpr: Process an unsolicited farp response iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Fibre Channel Address Resolution Protocol
+ * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
+ * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
+ * the FARP response request.
+ *
+ * Return code
+ * 0 - Successfully processed FARPR IOCB (currently always return 0)
+ **/
static int
lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
@@ -3768,6 +4841,25 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_rcv_fan: Process an unsolicited fan iocb command.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @fan_ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes a Fabric Address Notification (FAN) IOCB
+ * command received as an ELS unsolicited event. The FAN ELS command will
+ * only be processed on a physical port (i.e., the @vport represents the
+ * physical port). The fabric NodeName and PortName from the FAN IOCB are
+ * compared against those in the phba data structure. If any of those is
+ * different, the lpfc_initial_flogi() routine is invoked to initialize
+ * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
+ * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
+ * is invoked to register login to the fabric.
+ *
+ * Return code
+ * 0 - Successfully processed fan iocb (currently always return 0).
+ **/
static int
lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *fan_ndlp)
@@ -3797,6 +4889,16 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
+/**
+ * lpfc_els_timeout: Handler funciton to the els timer.
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the ELS timer after timeout. It posts the ELS
+ * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
+ * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
+ * up the worker thread. It is for the worker thread to invoke the routine
+ * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
+ **/
void
lpfc_els_timeout(unsigned long ptr)
{
@@ -3816,6 +4918,15 @@ lpfc_els_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_els_timeout_handler: Process an els timeout event.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine is the actual handler function that processes an ELS timeout
+ * event. It walks the ELS ring to get and abort all the IOCBs (except the
+ * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
+ * invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
void
lpfc_els_timeout_handler(struct lpfc_vport *vport)
{
@@ -3828,10 +4939,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
- /* If the timer is already canceled do nothing */
- if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
- return;
- }
spin_lock_irq(&phba->hbalock);
timeout = (uint32_t)(phba->fc_ratov << 1);
@@ -3886,6 +4993,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
+/**
+ * lpfc_els_flush_cmd: Clean up the outstanding els commands to a vport.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with a non-NULL completion callback function, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
+ * callback function, the IOCB will simply be released. Finally, it walks
+ * the ELS transmit completion queue to issue an abort IOCB to any transmit
+ * completion queue IOCB that is associated with the @vport and is not
+ * an IOCB from libdfc (i.e., the management plane IOCBs that are not
+ * part of the discovery state machine) out to HBA by invoking the
+ * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
+ * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
+ * the IOCBs are aborted when this function returns.
+ **/
void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
@@ -3948,6 +5075,23 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_els_flush_all_cmd: Clean up all the outstanding els commands to a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is used to clean up all the outstanding ELS commands on a
+ * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
+ * routine. After that, it walks the ELS transmit queue to remove all the
+ * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
+ * the IOCBs with the completion callback function associated, the callback
+ * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
+ * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
+ * callback function associated, the IOCB will simply be released. Finally,
+ * it walks the ELS transmit completion queue to issue an abort IOCB to any
+ * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
+ * management plane IOCBs that are not part of the discovery state machine)
+ * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
+ **/
void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
@@ -3992,6 +5136,166 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_send_els_failure_event: Posts an ELS command failure event.
+ * @phba: Pointer to hba context object.
+ * @cmdiocbp: Pointer to command iocb which reported error.
+ * @rspiocbp: Pointer to response iocb which reported error.
+ *
+ * This function sends an event when there is an ELS command
+ * failure.
+ **/
+void
+lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbp,
+ struct lpfc_iocbq *rspiocbp)
+{
+ struct lpfc_vport *vport = cmdiocbp->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_lsrjt_event lsrjt_event;
+ struct lpfc_fabric_event_header fabric_event;
+ struct ls_rjt stat;
+ struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+
+ ndlp = cmdiocbp->context1;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
+ lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
+ memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ cmdiocbp->context2)->virt);
+ lsrjt_event.command = *pcmd;
+ stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
+ lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(lsrjt_event),
+ (char *)&lsrjt_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+ }
+ if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+ (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ fabric_event.event_type = FC_REG_FABRIC_EVENT;
+ if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
+ else
+ fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
+ memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
+ sizeof(struct lpfc_name));
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(fabric_event),
+ (char *)&fabric_event,
+ LPFC_NL_VENDOR_ID);
+ return;
+ }
+
+}
+
+/**
+ * lpfc_send_els_event: Posts unsolicited els event.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer FC node object.
+ * @cmd: ELS command code.
+ *
+ * This function posts an event when there is an incoming
+ * unsolicited ELS command.
+ **/
+static void
+lpfc_send_els_event(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t *payload)
+{
+ struct lpfc_els_event_header *els_data = NULL;
+ struct lpfc_logo_event *logo_data = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (*payload == ELS_CMD_LOGO) {
+ logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
+ if (!logo_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0148 Failed to allocate memory "
+ "for LOGO event\n");
+ return;
+ }
+ els_data = &logo_data->header;
+ } else {
+ els_data = kmalloc(sizeof(struct lpfc_els_event_header),
+ GFP_KERNEL);
+ if (!els_data) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0149 Failed to allocate memory "
+ "for ELS event\n");
+ return;
+ }
+ }
+ els_data->event_type = FC_REG_ELS_EVENT;
+ switch (*payload) {
+ case ELS_CMD_PLOGI:
+ els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
+ break;
+ case ELS_CMD_PRLO:
+ els_data->subcategory = LPFC_EVENT_PRLO_RCV;
+ break;
+ case ELS_CMD_ADISC:
+ els_data->subcategory = LPFC_EVENT_ADISC_RCV;
+ break;
+ case ELS_CMD_LOGO:
+ els_data->subcategory = LPFC_EVENT_LOGO_RCV;
+ /* Copy the WWPN in the LOGO payload */
+ memcpy(logo_data->logo_wwpn, &payload[2],
+ sizeof(struct lpfc_name));
+ break;
+ default:
+ return;
+ }
+ memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ if (*payload == ELS_CMD_LOGO) {
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_logo_event),
+ (char *)logo_data,
+ LPFC_NL_VENDOR_ID);
+ kfree(logo_data);
+ } else {
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(struct lpfc_els_event_header),
+ (char *)els_data,
+ LPFC_NL_VENDOR_ID);
+ kfree(els_data);
+ }
+
+ return;
+}
+
+
+/**
+ * lpfc_els_unsol_buffer: Process an unsolicited event data buffer.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @elsiocb: pointer to lpfc els command iocb data structure.
+ *
+ * This routine is used for processing the IOCB associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited IOCB. If not, it will create a new one with
+ * the DID from the unsolicited IOCB. The ELS command from the unsolicited
+ * IOCB is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
static void
lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
@@ -4059,8 +5363,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
phba->fc_stat.elsRcvFrame++;
- if (elsiocb->context1)
- lpfc_nlp_put(elsiocb->context1);
elsiocb->context1 = lpfc_nlp_get(ndlp);
elsiocb->vport = vport;
@@ -4081,6 +5383,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+ lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -4118,6 +5421,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvLOGO++;
+ lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
break;
@@ -4130,6 +5434,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLO++;
+ lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
break;
@@ -4147,6 +5452,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"RCV ADISC: did:x%x/ste:x%x flg:x%x",
did, vport->port_state, ndlp->nlp_flag);
+ lpfc_send_els_event(vport, ndlp, payload);
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
@@ -4270,6 +5576,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
NULL);
}
+ lpfc_nlp_put(elsiocb->context1);
+ elsiocb->context1 = NULL;
return;
dropit:
@@ -4282,6 +5590,19 @@ dropit:
phba->fc_stat.elsRcvDrop++;
}
+/**
+ * lpfc_find_vport_by_vpid: Find a vport on a HBA through vport identifier.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
static struct lpfc_vport *
lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
@@ -4299,6 +5620,18 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
return NULL;
}
+/**
+ * lpfc_els_unsol_event: Process an unsolicited event from an els sli ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @elsiocb: pointer to lpfc els iocb data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
+ * SLI ring on which the unsolicited event was received.
+ **/
void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
@@ -4309,6 +5642,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
+ elsiocb->context1 = NULL;
elsiocb->context2 = NULL;
elsiocb->context3 = NULL;
@@ -4356,8 +5690,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* The different unsolicited event handlers would tell us
* if they are done with "mp" by setting context2 to NULL.
*/
- lpfc_nlp_put(elsiocb->context1);
- elsiocb->context1 = NULL;
if (elsiocb->context2) {
lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
elsiocb->context2 = NULL;
@@ -4376,6 +5708,19 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
+/**
+ * lpfc_do_scr_ns_plogi: Issue a plogi to the name server for scr.
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine issues a Port Login (PLOGI) to the Name Server with
+ * State Change Request (SCR) for a @vport. This routine will create an
+ * ndlp for the Name Server associated to the @vport if such node does
+ * not already exist. The PLOGI to Name Server is issued by invoking the
+ * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
+ * (FDMI) is configured to the @vport, a FDMI node will be created and
+ * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
+ **/
void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
@@ -4434,6 +5779,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_cmpl_reg_new_vport: Completion callback function to register new vport.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function to register new vport
+ * mailbox command. If the new vport mailbox command completes successfully,
+ * the fabric registration login shall be performed on physical port (the
+ * new vport created is actually a physical port, with VPI 0) or the port
+ * login to Name Server for State Change Request (SCR) will be performed
+ * on virtual port (real virtual port, with VPI greater than 0).
+ **/
static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -4491,6 +5848,15 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_register_new_vport: Register a new vport with a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine registers the @vport as a new virtual port with a HBA.
+ * It is done through a registering vpi mailbox command.
+ **/
static void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
@@ -4531,6 +5897,26 @@ mbox_err_exit:
return;
}
+/**
+ * lpfc_cmpl_els_fdisc: Completion function for fdisc iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to a Fabric Discover
+ * (FDISC) ELS command. Since all the FDISC ELS commands are issued
+ * single threaded, each FDISC completion callback function will reset
+ * the discovery timer for all vports such that the timers will not get
+ * unnecessary timeout. The function checks the FDISC IOCB status. If error
+ * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
+ * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
+ * assigned to the vport has been changed with the completion of the FDISC
+ * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
+ * are unregistered from the HBA, and then the lpfc_register_new_vport()
+ * routine is invoked to register new vport with the HBA. Otherwise, the
+ * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
+ * Server for State Change Request (SCR).
+ **/
static void
lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4565,58 +5951,80 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0124 FDISC failed. (%d/%d)\n",
+ "0126 FDISC failed. (%d/%d)\n",
irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto fdisc_failed;
+ }
if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_nlp_put(ndlp);
/* giving up on FDISC. Cancel discovery timer */
lpfc_can_disctmo(vport);
- } else {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_FABRIC;
- if (vport->phba->fc_topology == TOPOLOGY_LOOP)
- vport->fc_flag |= FC_PUBLIC_LOOP;
- spin_unlock_irq(shost->host_lock);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+ vport->fc_flag |= FC_PUBLIC_LOOP;
+ spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
- lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
- !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
- /* If our NportID changed, we need to ensure all
- * remaining NPORTs get unreg_login'ed so we can
- * issue unreg_vpi.
- */
- list_for_each_entry_safe(np, next_np,
- &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp) ||
- (np->nlp_state != NLP_STE_NPR_NODE) ||
- !(np->nlp_flag & NLP_NPR_ADISC))
- continue;
- spin_lock_irq(shost->host_lock);
- np->nlp_flag &= ~NLP_NPR_ADISC;
- spin_unlock_irq(shost->host_lock);
- lpfc_unreg_rpi(vport, np);
- }
- lpfc_mbx_unreg_vpi(vport);
+ vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
+ if ((vport->fc_prevDID != vport->fc_myDID) &&
+ !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
+ /* If our NportID changed, we need to ensure all
+ * remaining NPORTs get unreg_login'ed so we can
+ * issue unreg_vpi.
+ */
+ list_for_each_entry_safe(np, next_np,
+ &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp) ||
+ (np->nlp_state != NLP_STE_NPR_NODE) ||
+ !(np->nlp_flag & NLP_NPR_ADISC))
+ continue;
spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ np->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, np);
}
-
- if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
- lpfc_register_new_vport(phba, vport, ndlp);
- else
- lpfc_do_scr_ns_plogi(phba, vport);
-
- /* Unconditionaly kick off releasing fabric node for vports */
- lpfc_nlp_put(ndlp);
+ lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+ else
+ lpfc_do_scr_ns_plogi(phba, vport);
+ goto out;
+fdisc_failed:
+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ /* Cancel discovery timer */
+ lpfc_can_disctmo(vport);
+ lpfc_nlp_put(ndlp);
out:
lpfc_els_free_iocb(phba, cmdiocb);
}
+/**
+ * lpfc_issue_els_fdisc: Issue a fdisc iocb command.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @retry: number of retries to the command IOCB.
+ *
+ * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
+ * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
+ * routine to issue the IOCB, which makes sure only one outstanding fabric
+ * IOCB will be sent off HBA at any given time.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the FDISC ELS command.
+ *
+ * Return code
+ * 0 - Successfully issued fdisc iocb command
+ * 1 - Failed to issue fdisc iocb command
+ **/
static int
lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint8_t retry)
@@ -4691,6 +6099,20 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
}
+/**
+ * lpfc_cmpl_els_npiv_logo: Completion function with vport logo.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the completion callback function to the issuing of a LOGO
+ * ELS command off a vport. It frees the command IOCB and then decrement the
+ * reference count held on ndlp for this completion function, indicating that
+ * the reference to the ndlp is no long needed. Note that the
+ * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
+ * callback function and an additional explicit ndlp reference decrementation
+ * will trigger the actual release of the ndlp.
+ **/
static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4712,6 +6134,22 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_put(ndlp);
}
+/**
+ * lpfc_issue_els_npiv_logo: Issue a logo off a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine issues a LOGO ELS command to an @ndlp off a @vport.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
+ * Return codes
+ * 0 - Successfully issued logo off the @vport
+ * 1 - Failed to issue logo off the @vport
+ **/
int
lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4757,6 +6195,17 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
+/**
+ * lpfc_fabric_block_timeout: Handler function to the fabric block timer.
+ * @ptr: holder for the timer function associated data.
+ *
+ * This routine is invoked by the fabric iocb block timer after
+ * timeout. It posts the fabric iocb block timeout event by setting the
+ * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
+ * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
+ * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
+ * posted event WORKER_FABRIC_BLOCK_TMO.
+ **/
void
lpfc_fabric_block_timeout(unsigned long ptr)
{
@@ -4775,6 +6224,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
return;
}
+/**
+ * lpfc_resume_fabric_iocbs: Issue a fabric iocb from driver internal list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues one fabric iocb from the driver internal list to
+ * the HBA. It first checks whether it's ready to issue one fabric iocb to
+ * the HBA (whether there is no outstanding fabric iocb). If so, it shall
+ * remove one pending fabric iocb from the driver internal list and invokes
+ * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
+ **/
static void
lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4824,6 +6283,15 @@ repeat:
return;
}
+/**
+ * lpfc_unblock_fabric_iocbs: Unblock issuing fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine unblocks the issuing fabric iocb command. The function
+ * will clear the fabric iocb block bit and then invoke the routine
+ * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
+ * from the driver internal fabric iocb list.
+ **/
void
lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4833,6 +6301,15 @@ lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_block_fabric_iocbs: Block issuing fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine blocks the issuing fabric iocb for a specified amount of
+ * time (currently 100 ms). This is done by set the fabric iocb block bit
+ * and set up a timeout timer for 100ms. When the block bit is set, no more
+ * fabric iocb will be issued out of the HBA.
+ **/
static void
lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
{
@@ -4846,6 +6323,19 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_cmpl_fabric_iocb: Completion callback function for fabric iocb.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine is the callback function that is put to the fabric iocb's
+ * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * function first restores and invokes the original iocb's callback function
+ * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
+ * fabric bound iocb from the driver internal fabric iocb list onto the wire.
+ **/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -4892,6 +6382,30 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
}
+/**
+ * lpfc_issue_fabric_iocb: Issue a fabric iocb command.
+ * @phba: pointer to lpfc hba data structure.
+ * @iocb: pointer to lpfc command iocb data structure.
+ *
+ * This routine is used as the top-level API for issuing a fabric iocb command
+ * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
+ * function makes sure that only one fabric bound iocb will be outstanding at
+ * any given time. As such, this function will first check to see whether there
+ * is already an outstanding fabric iocb on the wire. If so, it will put the
+ * newly issued iocb onto the driver internal fabric iocb list, waiting to be
+ * issued later. Otherwise, it will issue the iocb on the wire and update the
+ * fabric iocb count it indicate that there is one fabric iocb on the wire.
+ *
+ * Note, this implementation has a potential sending out fabric IOCBs out of
+ * order. The problem is caused by the construction of the "ready" boolen does
+ * not include the condition that the internal fabric IOCB list is empty. As
+ * such, it is possible a fabric IOCB issued by this routine might be "jump"
+ * ahead of the fabric IOCBs in the internal list.
+ *
+ * Return code
+ * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
+ * IOCB_ERROR - failed to issue fabric iocb
+ **/
static int
lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
{
@@ -4937,7 +6451,17 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
return ret;
}
-
+/**
+ * lpfc_fabric_abort_vport: Abort a vport's iocbs from driver fabric iocb list.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine aborts all the IOCBs associated with a @vport from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @vport off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
@@ -4967,6 +6491,17 @@ static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
}
}
+/**
+ * lpfc_fabric_abort_nport: Abort a ndlp's iocbs from driver fabric iocb list.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine aborts all the IOCBs associated with an @ndlp from the
+ * driver internal fabric IOCB list. The list contains fabric IOCBs to be
+ * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
+ * list, removes each IOCB associated with the @ndlp off the list, set the
+ * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
+ * associated with the IOCB.
+ **/
void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
@@ -4996,6 +6531,17 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
}
}
+/**
+ * lpfc_fabric_abort_hba: Abort all iocbs on driver fabric iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine aborts all the IOCBs currently on the driver internal
+ * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
+ * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
+ * list, removes IOCBs off the list, set the status feild to
+ * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
+ * the IOCB.
+ **/
void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a98d11bf3576..8c64494444bf 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -30,6 +30,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_scsi.h"
@@ -88,14 +89,6 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
&phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
-
- /*
- * A device is normally blocked for rediscovery and unblocked when
- * devloss timeout happens. In case a vport is removed or driver
- * unloaded before devloss timeout happens, we need to unblock here.
- */
- scsi_target_unblock(&rport->dev);
- return;
}
/*
@@ -215,8 +208,16 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return;
}
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0284 Devloss timeout Ignored on "
+ "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
+ "NPort x%x\n",
+ *name, *(name+1), *(name+2), *(name+3),
+ *(name+4), *(name+5), *(name+6), *(name+7),
+ ndlp->nlp_DID);
return;
+ }
if (ndlp->nlp_type & NLP_FABRIC) {
/* We will clean up these Nodes in linkup */
@@ -237,8 +238,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
- if (vport->load_flag & FC_UNLOADING)
- warn_on = 0;
if (warn_on) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -276,6 +275,124 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
}
+/**
+ * lpfc_alloc_fast_evt: Allocates data structure for posting event.
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the functions which need to post
+ * events from interrupt context. This function allocates data
+ * structure required for posting event. It also keeps track of
+ * number of events pending and prevent event storm when there are
+ * too many events.
+ **/
+struct lpfc_fast_path_event *
+lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
+ struct lpfc_fast_path_event *ret;
+
+ /* If there are lot of fast event do not exhaust memory due to this */
+ if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
+ return NULL;
+
+ ret = kzalloc(sizeof(struct lpfc_fast_path_event),
+ GFP_ATOMIC);
+ if (ret)
+ atomic_inc(&phba->fast_event_count);
+ INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+ ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ return ret;
+}
+
+/**
+ * lpfc_free_fast_evt: Frees event data structure.
+ * @phba: Pointer to hba context object.
+ * @evt: Event object which need to be freed.
+ *
+ * This function frees the data structure required for posting
+ * events.
+ **/
+void
+lpfc_free_fast_evt(struct lpfc_hba *phba,
+ struct lpfc_fast_path_event *evt) {
+
+ atomic_dec(&phba->fast_event_count);
+ kfree(evt);
+}
+
+/**
+ * lpfc_send_fastpath_evt: Posts events generated from fast path.
+ * @phba: Pointer to hba context object.
+ * @evtp: Event data structure.
+ *
+ * This function is called from worker thread, when the interrupt
+ * context need to post an event. This function posts the event
+ * to fc transport netlink interface.
+ **/
+static void
+lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+ struct lpfc_work_evt *evtp)
+{
+ unsigned long evt_category, evt_sub_category;
+ struct lpfc_fast_path_event *fast_evt_data;
+ char *evt_data;
+ uint32_t evt_data_size;
+ struct Scsi_Host *shost;
+
+ fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
+ work_evt);
+
+ evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
+ evt_sub_category = (unsigned long) fast_evt_data->un.
+ fabric_evt.subcategory;
+ shost = lpfc_shost_from_vport(fast_evt_data->vport);
+ if (evt_category == FC_REG_FABRIC_EVENT) {
+ if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
+ evt_data = (char *) &fast_evt_data->un.read_check_error;
+ evt_data_size = sizeof(fast_evt_data->un.
+ read_check_error);
+ } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+ (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+ evt_data = (char *) &fast_evt_data->un.fabric_evt;
+ evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else if (evt_category == FC_REG_SCSI_EVENT) {
+ switch (evt_sub_category) {
+ case LPFC_EVENT_QFULL:
+ case LPFC_EVENT_DEVBSY:
+ evt_data = (char *) &fast_evt_data->un.scsi_evt;
+ evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
+ break;
+ case LPFC_EVENT_CHECK_COND:
+ evt_data = (char *) &fast_evt_data->un.check_cond_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ check_cond_evt);
+ break;
+ case LPFC_EVENT_VARQUEDEPTH:
+ evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
+ evt_data_size = sizeof(fast_evt_data->un.
+ queue_depth_evt);
+ break;
+ default:
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+ } else {
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+ }
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+ LPFC_NL_VENDOR_ID);
+
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+}
+
static void
lpfc_work_list_done(struct lpfc_hba *phba)
{
@@ -347,6 +464,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
lpfc_unblock_mgmt_io(phba);
complete((struct completion *)(evtp->evt_arg2));
break;
+ case LPFC_EVT_FASTPATH_MGMT_EVT:
+ lpfc_send_fastpath_evt(phba, evtp);
+ free_evt = 0;
+ break;
}
if (free_evt)
kfree(evtp);
@@ -371,6 +492,7 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
if (ha_copy & HA_ERATT)
+ /* Handle the error attention event */
lpfc_handle_eratt(phba);
if (ha_copy & HA_MBATT)
@@ -378,6 +500,7 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for(i = 0; i <= phba->max_vpi; i++) {
@@ -462,20 +585,25 @@ lpfc_do_work(void *p)
set_user_nice(current, -20);
phba->data_flags = 0;
- while (1) {
+ while (!kthread_should_stop()) {
/* wait and check worker queue activities */
rc = wait_event_interruptible(phba->work_waitq,
(test_and_clear_bit(LPFC_DATA_READY,
&phba->data_flags)
|| kthread_should_stop()));
- BUG_ON(rc);
-
- if (kthread_should_stop())
+ /* Signal wakeup shall terminate the worker thread */
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "0433 Wakeup on signal: rc=x%x\n", rc);
break;
+ }
/* Attend pending lpfc data processing */
lpfc_work_done(phba);
}
+ phba->worker_thread = NULL;
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "0432 Worker thread stopped.\n");
return 0;
}
@@ -1013,14 +1141,10 @@ out:
}
static void
-lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+lpfc_enable_la(struct lpfc_hba *phba)
{
uint32_t control;
struct lpfc_sli *psli = &phba->sli;
-
- lpfc_linkdown(phba);
-
- /* turn on Link Attention interrupts - no CLEAR_LA needed */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
@@ -1030,6 +1154,15 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
+static void
+lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
+{
+ lpfc_linkdown(phba);
+ lpfc_enable_la(phba);
+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
+}
+
+
/*
* This routine handles processing a READ_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -1077,8 +1210,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
+ if (la->mm)
+ phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+ else
+ phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- if (la->attType == AT_LINK_UP) {
+ if (la->attType == AT_LINK_UP && (!la->mm)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1090,13 +1227,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x %d\n",
la->eventTag, phba->fc_eventTag,
la->granted_AL_PA, la->UlnkSpeed,
- phba->alpa_map[0]);
+ phba->alpa_map[0],
+ la->mm, la->fa,
+ phba->wait_4_mlo_maint_flg);
}
lpfc_mbx_process_link_up(phba, la);
- } else {
+ } else if (la->attType == AT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -1109,11 +1248,46 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
+ "Data: x%x x%x x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag,
+ la->mm, la->fa);
+ }
+ lpfc_mbx_issue_link_down(phba);
+ }
+ if (la->mm && la->attType == AT_LINK_UP) {
+ if (phba->link_state != LPFC_LINK_DOWN) {
+ phba->fc_stat.LinkDown++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1312 Link Down Event x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
+ lpfc_mbx_issue_link_down(phba);
+ } else
+ lpfc_enable_la(phba);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1310 Menlo Maint Mode Link up Event x%x rcvd "
+ "Data: x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag);
+ /*
+ * The cmnd that triggered this will be waiting for this
+ * signal.
+ */
+ /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
+ if (phba->wait_4_mlo_maint_flg) {
+ phba->wait_4_mlo_maint_flg = 0;
+ wake_up_interruptible(&phba->wait_4_mlo_m_q);
}
- lpfc_mbx_issue_link_down(phba);
+ }
+
+ if (la->fa) {
+ if (la->mm)
+ lpfc_issue_clear_la(phba, vport);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
+ "1311 fa %d\n", la->fa);
}
lpfc_mbx_cmpl_read_la_free_mbuf:
@@ -1177,7 +1351,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
scsi_host_put(shost);
}
-void
+int
lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -1186,7 +1360,7 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
- return;
+ return 1;
lpfc_unreg_vpi(phba, vport->vpi, mbox);
mbox->vport = vport;
@@ -1197,7 +1371,9 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
vport->unreg_vpi_cmpl = VPORT_ERROR;
+ return rc;
}
+ return 0;
}
static void
@@ -1553,6 +1729,22 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
lpfc_register_remote_port(vport, ndlp);
}
+ if ((new_state == NLP_STE_MAPPED_NODE) &&
+ (vport->stat_data_enabled)) {
+ /*
+ * A new target is discovered, if there is no buffer for
+ * statistical data collection allocate buffer.
+ */
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_KERNEL);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0286 lpfc_nlp_state_cleanup failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
/*
* if we added to Mapped list, but the remote port
* registration failed or assigned a target id outside
@@ -1665,6 +1857,32 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
NLP_STE_UNUSED_NODE);
}
+/**
+ * lpfc_initialize_node: Initialize all fields of node object.
+ * @vport: Pointer to Virtual Port object.
+ * @ndlp: Pointer to FC node object.
+ * @did: FC_ID of the node.
+ * This function is always called when node object need to
+ * be initialized. It initializes all the fields of the node
+ * object.
+ **/
+static inline void
+lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+{
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->vport = vport;
+ ndlp->nlp_sid = NLP_NO_SID;
+ kref_init(&ndlp->kref);
+ NLP_INT_NODE_ACT(ndlp);
+ atomic_set(&ndlp->cmd_pending, 0);
+ ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+}
struct lpfc_nodelist *
lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
@@ -1705,17 +1923,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* re-initialize ndlp except of ndlp linked list pointer */
memset((((char *)ndlp) + sizeof (struct list_head)), 0,
sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
- INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
- INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
- ndlp->nlp_DID = did;
- ndlp->vport = vport;
- ndlp->nlp_sid = NLP_NO_SID;
- /* ndlp management re-initialize */
- kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
+ lpfc_initialize_node(vport, ndlp, did);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
@@ -2786,7 +2994,7 @@ restart_disc:
default:
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0229 Unexpected discovery timeout, "
+ "0273 Unexpected discovery timeout, "
"vport State x%x\n", vport->port_state);
break;
}
@@ -2929,17 +3137,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
{
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
- INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
- INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
- ndlp->nlp_DID = did;
- ndlp->vport = vport;
- ndlp->nlp_sid = NLP_NO_SID;
+
+ lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
- kref_init(&ndlp->kref);
- NLP_INT_NODE_ACT(ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x",
@@ -2979,8 +3179,10 @@ lpfc_nlp_release(struct kref *kref)
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
/* free ndlp memory for final ndlp release */
- if (NLP_CHK_FREE_REQ(ndlp))
+ if (NLP_CHK_FREE_REQ(ndlp)) {
+ kfree(ndlp->lat_data);
mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
+ }
}
/* This routine bumps the reference count for a ndlp structure to ensure
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7773b949aa7c..4168c7b498b8 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -65,6 +65,9 @@
#define SLI3_IOCB_RSP_SIZE 64
+/* vendor ID used in SCSI netlink calls */
+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
+
/* Common Transport structures and definitions */
union CtRevisionId {
@@ -866,6 +869,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */
} un;
} D_ID;
+#define RSCN_ADDRESS_FORMAT_PORT 0x0
+#define RSCN_ADDRESS_FORMAT_AREA 0x1
+#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
+#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
+#define RSCN_ADDRESS_FORMAT_MASK 0x3
+
/*
* Structure to define all ELS Payload types
*/
@@ -1107,6 +1116,8 @@ typedef struct {
/* Start FireFly Register definitions */
#define PCI_VENDOR_ID_EMULEX 0x10df
#define PCI_DEVICE_ID_FIREFLY 0x1ae5
+#define PCI_DEVICE_ID_PROTEUS_VF 0xe100
+#define PCI_DEVICE_ID_PROTEUS_PF 0xe180
#define PCI_DEVICE_ID_SAT_SMB 0xf011
#define PCI_DEVICE_ID_SAT_MID 0xf015
#define PCI_DEVICE_ID_RFLY 0xf095
@@ -1133,10 +1144,12 @@ typedef struct {
#define PCI_DEVICE_ID_LP11000S 0xfc10
#define PCI_DEVICE_ID_LPE11000S 0xfc20
#define PCI_DEVICE_ID_SAT_S 0xfc40
+#define PCI_DEVICE_ID_PROTEUS_S 0xfc50
#define PCI_DEVICE_ID_HELIOS 0xfd00
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
+#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
@@ -1154,6 +1167,7 @@ typedef struct {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
+#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -1198,6 +1212,18 @@ typedef struct { /* FireFly BIU registers */
#define HA_RXATT 0x00000008 /* Bit 3 */
#define HA_RXMASK 0x0000000f
+#define HA_R0_CLR_MSK (HA_R0RE_REQ | HA_R0CE_RSP | HA_R0ATT)
+#define HA_R1_CLR_MSK (HA_R1RE_REQ | HA_R1CE_RSP | HA_R1ATT)
+#define HA_R2_CLR_MSK (HA_R2RE_REQ | HA_R2CE_RSP | HA_R2ATT)
+#define HA_R3_CLR_MSK (HA_R3RE_REQ | HA_R3CE_RSP | HA_R3ATT)
+
+#define HA_R0_POS 3
+#define HA_R1_POS 7
+#define HA_R2_POS 11
+#define HA_R3_POS 15
+#define HA_LE_POS 29
+#define HA_MB_POS 30
+#define HA_ER_POS 31
/* Chip Attention Register */
#define CA_REG_OFFSET 4 /* Byte offset from register base address */
@@ -1235,7 +1261,7 @@ typedef struct { /* FireFly BIU registers */
/* Host Control Register */
-#define HC_REG_OFFSET 12 /* Word offset from register base address */
+#define HC_REG_OFFSET 12 /* Byte offset from register base address */
#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
@@ -1248,6 +1274,19 @@ typedef struct { /* FireFly BIU registers */
#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
+/* Message Signaled Interrupt eXtension (MSI-X) message identifiers */
+#define MSIX_DFLT_ID 0
+#define MSIX_RNG0_ID 0
+#define MSIX_RNG1_ID 1
+#define MSIX_RNG2_ID 2
+#define MSIX_RNG3_ID 3
+
+#define MSIX_LINK_ID 4
+#define MSIX_MBOX_ID 5
+
+#define MSIX_SPARE0_ID 6
+#define MSIX_SPARE1_ID 7
+
/* Mailbox Commands */
#define MBX_SHUTDOWN 0x00 /* terminate testing */
#define MBX_LOAD_SM 0x01
@@ -1285,10 +1324,14 @@ typedef struct { /* FireFly BIU registers */
#define MBX_KILL_BOARD 0x24
#define MBX_CONFIG_FARP 0x25
#define MBX_BEACON 0x2A
+#define MBX_CONFIG_MSI 0x30
#define MBX_HEARTBEAT 0x31
#define MBX_WRITE_VPARMS 0x32
#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_PORT_CAPABILITIES 0x3B
+#define MBX_PORT_IOV_CONTROL 0x3C
+
#define MBX_CONFIG_HBQ 0x7C
#define MBX_LOAD_AREA 0x81
#define MBX_RUN_BIU_DIAG64 0x84
@@ -1474,24 +1517,18 @@ struct ulp_bde64 { /* SLI-2 */
uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
VALUE !! */
#endif
-
-#define BUFF_USE_RSVD 0x01 /* bdeFlags */
-#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
-#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
-#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
- buffer */
-#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
- addr */
-#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
-#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
-#define BUFF_TYPE_INVALID 0x80 /* "" "" */
+#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */
} f;
} tus;
uint32_t addrLow;
uint32_t addrHigh;
};
-#define BDE64_SIZE_WORD 0
-#define BPL64_SIZE_WORD 0x40
typedef struct ULP_BDL { /* SLI-2 */
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1507,6 +1544,108 @@ typedef struct ULP_BDL { /* SLI-2 */
uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
} ULP_BDL;
+/*
+ * BlockGuard Definitions
+ */
+
+enum lpfc_protgrp_type {
+ LPFC_PG_TYPE_INVALID = 0, /* used to indicate errors */
+ LPFC_PG_TYPE_NO_DIF, /* no DIF data pointed to by prot grp */
+ LPFC_PG_TYPE_EMBD_DIF, /* DIF is embedded (inline) with data */
+ LPFC_PG_TYPE_DIF_BUF /* DIF has its own scatter/gather list */
+};
+
+/* PDE Descriptors */
+#define LPFC_PDE1_DESCRIPTOR 0x81
+#define LPFC_PDE2_DESCRIPTOR 0x82
+#define LPFC_PDE3_DESCRIPTOR 0x83
+
+/* BlockGuard Profiles */
+enum lpfc_bg_prof_codes {
+ LPFC_PROF_INVALID,
+ LPFC_PROF_A1 = 128, /* Full Protection */
+ LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */
+ LPFC_PROF_A3,
+ LPFC_PROF_A4,
+ LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */
+ LPFC_PROF_B2,
+ LPFC_PROF_B3,
+ LPFC_PROF_C1, /* Separate DIFs: C1~C3 */
+ LPFC_PROF_C2,
+ LPFC_PROF_C3,
+ LPFC_PROF_D1, /* Full Protection */
+ LPFC_PROF_D2, /* Partial Protection & Check Disabling */
+ LPFC_PROF_D3,
+ LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */
+ LPFC_PROF_E2,
+ LPFC_PROF_E3,
+ LPFC_PROF_E4,
+ LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */
+ /* F1 Translation BDE */
+ LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */
+ LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */
+ LPFC_PROF_ANT2,
+ LPFC_PROF_AST2
+};
+
+/* BlockGuard error-control defines */
+#define BG_EC_STOP_ERR 0x00
+#define BG_EC_CONT_ERR 0x01
+#define BG_EC_IGN_UNINIT_STOP_ERR 0x10
+#define BG_EC_IGN_UNINIT_CONT_ERR 0x11
+
+/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */
+#define PDE_DESC_TYPE_MASK 0xff000000
+#define PDE_DESC_TYPE_SHIFT 24
+#define PDE_BG_PROFILE_MASK 0x00ff0000
+#define PDE_BG_PROFILE_SHIFT 16
+#define PDE_BLOCK_LEN_MASK 0x0000fffc
+#define PDE_BLOCK_LEN_SHIFT 2
+#define PDE_ERR_CTRL_MASK 0x00000003
+#define PDE_ERR_CTRL_SHIFT 0
+/* PDE word 1 bit masks and shifts */
+#define PDE_APPTAG_MASK_MASK 0xffff0000
+#define PDE_APPTAG_MASK_SHIFT 16
+#define PDE_APPTAG_VAL_MASK 0x0000ffff
+#define PDE_APPTAG_VAL_SHIFT 0
+struct lpfc_pde {
+ uint32_t parms; /* bitfields of descriptor, prof, len, and ec */
+ uint32_t apptag; /* bitfields of app tag maskand app tag value */
+ uint32_t reftag; /* reference tag occupying all 32 bits */
+};
+
+/* inline function to set fields in parms of PDE */
+static inline void
+lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec)
+{
+ uint32_t *wp = &p->parms;
+
+ /* spec indicates that adapter appends two 0's to length field */
+ len = len >> 2;
+
+ *wp &= 0;
+ *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK);
+ *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK);
+ *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK);
+ *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK);
+ *wp = le32_to_cpu(*wp);
+}
+
+/* inline function to set apptag and reftag fields of PDE */
+static inline void
+lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval,
+ u32 reftag)
+{
+ uint32_t *wp = &p->apptag;
+ *wp &= 0;
+ *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK);
+ *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK);
+ *wp = le32_to_cpu(*wp);
+ wp = &p->reftag;
+ *wp = le32_to_cpu(reftag);
+}
+
+
/* Structure for MB Command LOAD_SM and DOWN_LOAD */
typedef struct {
@@ -2201,7 +2340,10 @@ typedef struct {
typedef struct {
uint32_t eventTag; /* Event tag */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1:22;
+ uint32_t rsvd1:19;
+ uint32_t fa:1;
+ uint32_t mm:1; /* Menlo Maintenance mode enabled */
+ uint32_t rx:1;
uint32_t pb:1;
uint32_t il:1;
uint32_t attType:8;
@@ -2209,7 +2351,10 @@ typedef struct {
uint32_t attType:8;
uint32_t il:1;
uint32_t pb:1;
- uint32_t rsvd1:22;
+ uint32_t rx:1;
+ uint32_t mm:1;
+ uint32_t fa:1;
+ uint32_t rsvd1:19;
#endif
#define AT_RESERVED 0x00 /* Reserved - attType */
@@ -2230,6 +2375,7 @@ typedef struct {
#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
+#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
union {
struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
@@ -2324,6 +2470,60 @@ typedef struct {
#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
+#define WAKE_UP_PARMS_REGION_ID 4
+#define WAKE_UP_PARMS_WORD_SIZE 15
+
+/* Option rom version structure */
+struct prog_id {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t type;
+ uint8_t id;
+ uint32_t ver:4; /* Major Version */
+ uint32_t rev:4; /* Revision */
+ uint32_t lev:2; /* Level */
+ uint32_t dist:2; /* Dist Type */
+ uint32_t num:4; /* number after dist type */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t num:4; /* number after dist type */
+ uint32_t dist:2; /* Dist Type */
+ uint32_t lev:2; /* Level */
+ uint32_t rev:4; /* Revision */
+ uint32_t ver:4; /* Major Version */
+ uint8_t id;
+ uint8_t type;
+#endif
+};
+
+/* Structure for MB Command UPDATE_CFG (0x1B) */
+
+struct update_cfg_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd2:16;
+ uint32_t type:8;
+ uint32_t rsvd:1;
+ uint32_t ra:1;
+ uint32_t co:1;
+ uint32_t cv:1;
+ uint32_t req:4;
+ uint32_t entry_length:16;
+ uint32_t region_id:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t req:4;
+ uint32_t cv:1;
+ uint32_t co:1;
+ uint32_t ra:1;
+ uint32_t rsvd:1;
+ uint32_t type:8;
+ uint32_t rsvd2:16;
+ uint32_t region_id:16;
+ uint32_t entry_length:16;
+#endif
+
+ uint32_t resp_info;
+ uint32_t byte_cnt;
+ uint32_t data_offset;
+};
+
struct hbq_mask {
#ifdef __BIG_ENDIAN_BITFIELD
uint8_t tmatch;
@@ -2487,11 +2687,19 @@ typedef struct {
uint32_t pcbLow; /* bit 31:0 of memory based port config block */
uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
- uint32_t hbainit[6];
+ uint32_t hbainit[5];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+#else /* __LITTLE_ENDIAN */
+ uint32_t rsvd : 31; /* least significant 31 bits of word 9 */
+ uint32_t hps : 1; /* bit 31 word9 Host Pointer in slim */
+#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd : 24; /* Reserved */
- uint32_t cmv : 1; /* Configure Max VPIs */
+ uint32_t rsvd1 : 23; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t chbs : 1; /* Cofigure Host Backing store */
@@ -2508,10 +2716,12 @@ typedef struct {
uint32_t csah : 1; /* Configure Synchronous Abort Handling */
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
- uint32_t rsvd : 24; /* Reserved */
+ uint32_t cbg : 1; /* Configure BlockGuard */
+ uint32_t rsvd1 : 23; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd2 : 24; /* Reserved */
+ uint32_t rsvd2 : 23; /* Reserved */
+ uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
@@ -2529,7 +2739,8 @@ typedef struct {
uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
- uint32_t rsvd2 : 24; /* Reserved */
+ uint32_t gbg : 1; /* Grant BlockGuard */
+ uint32_t rsvd2 : 23; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2560,6 +2771,40 @@ typedef struct {
} CONFIG_PORT_VAR;
+/* Structure for MB Command CONFIG_MSI (0x30) */
+struct config_msi_var {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t dfltMsgNum:8; /* Default message number */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t reportFlag:1; /* Report association flag */
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t reportFlag:1; /* Report association flag */
+ uint32_t addFlag:1; /* Add association flag */
+ uint32_t dfltPresent:1; /* Default message number present */
+ uint32_t rsvd2:5; /* Reserved */
+ uint32_t NID:5; /* Number of secondary attention IDs */
+ uint32_t rsvd1:11; /* Reserved */
+ uint32_t dfltMsgNum:8; /* Default message number */
+#endif
+ uint32_t attentionConditions[2];
+ uint8_t attentionId[16];
+ uint8_t messageNumberByHA[64];
+ uint8_t messageNumberByID[16];
+ uint32_t autoClearHA[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32_t rsvd3:16;
+ uint32_t autoClearID:16;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ uint32_t autoClearID:16;
+ uint32_t rsvd3:16;
+#endif
+ uint32_t rsvd4;
+};
+
/* SLI-2 Port Control Block */
/* SLIM POINTER */
@@ -2678,10 +2923,12 @@ typedef union {
* NEW_FEATURE
*/
struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
+ struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
} MAILVARIANTS;
/*
@@ -2715,11 +2962,19 @@ struct sli3_pgp {
uint32_t hbq_get[16];
};
-typedef union {
- struct sli2_desc s2;
- struct sli3_desc s3;
- struct sli3_pgp s3_pgp;
-} SLI_VAR;
+struct sli3_inb_pgp {
+ uint32_t ha_copy;
+ uint32_t counter;
+ struct lpfc_pgp port[MAX_RINGS];
+ uint32_t hbq_get[16];
+};
+
+union sli_var {
+ struct sli2_desc s2;
+ struct sli3_desc s3;
+ struct sli3_pgp s3_pgp;
+ struct sli3_inb_pgp s3_inb_pgp;
+};
typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -2737,7 +2992,7 @@ typedef struct {
#endif
MAILVARIANTS un;
- SLI_VAR us;
+ union sli_var us;
} MAILBOX_t;
/*
@@ -3105,6 +3360,115 @@ struct que_xri64cx_ext_fields {
struct lpfc_hbq_entry buff[5];
};
+struct sli3_bg_fields {
+ uint32_t filler[6]; /* word 8-13 in IOCB */
+ uint32_t bghm; /* word 14 - BlockGuard High Water Mark */
+/* Bitfields for bgstat (BlockGuard Status - word 15 of IOCB) */
+#define BGS_BIDIR_BG_PROF_MASK 0xff000000
+#define BGS_BIDIR_BG_PROF_SHIFT 24
+#define BGS_BIDIR_ERR_COND_FLAGS_MASK 0x003f0000
+#define BGS_BIDIR_ERR_COND_SHIFT 16
+#define BGS_BG_PROFILE_MASK 0x0000ff00
+#define BGS_BG_PROFILE_SHIFT 8
+#define BGS_INVALID_PROF_MASK 0x00000020
+#define BGS_INVALID_PROF_SHIFT 5
+#define BGS_UNINIT_DIF_BLOCK_MASK 0x00000010
+#define BGS_UNINIT_DIF_BLOCK_SHIFT 4
+#define BGS_HI_WATER_MARK_PRESENT_MASK 0x00000008
+#define BGS_HI_WATER_MARK_PRESENT_SHIFT 3
+#define BGS_REFTAG_ERR_MASK 0x00000004
+#define BGS_REFTAG_ERR_SHIFT 2
+#define BGS_APPTAG_ERR_MASK 0x00000002
+#define BGS_APPTAG_ERR_SHIFT 1
+#define BGS_GUARD_ERR_MASK 0x00000001
+#define BGS_GUARD_ERR_SHIFT 0
+ uint32_t bgstat; /* word 15 - BlockGuard Status */
+};
+
+static inline uint32_t
+lpfc_bgs_get_bidir_bg_prof(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_BIDIR_BG_PROF_MASK) >>
+ BGS_BIDIR_BG_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bidir_err_cond(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_BIDIR_ERR_COND_FLAGS_MASK) >>
+ BGS_BIDIR_ERR_COND_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_bg_prof(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_BG_PROFILE_MASK) >>
+ BGS_BG_PROFILE_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_invalid_prof(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_INVALID_PROF_MASK) >>
+ BGS_INVALID_PROF_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_uninit_dif_block(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_UNINIT_DIF_BLOCK_MASK) >>
+ BGS_UNINIT_DIF_BLOCK_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_hi_water_mark_present(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_HI_WATER_MARK_PRESENT_MASK) >>
+ BGS_HI_WATER_MARK_PRESENT_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_reftag_err(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_REFTAG_ERR_MASK) >>
+ BGS_REFTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_apptag_err(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_APPTAG_ERR_MASK) >>
+ BGS_APPTAG_ERR_SHIFT;
+}
+
+static inline uint32_t
+lpfc_bgs_get_guard_err(uint32_t bgstat)
+{
+ return (le32_to_cpu(bgstat) & BGS_GUARD_ERR_MASK) >>
+ BGS_GUARD_ERR_SHIFT;
+}
+
+#define LPFC_EXT_DATA_BDE_COUNT 3
+struct fcp_irw_ext {
+ uint32_t io_tag64_low;
+ uint32_t io_tag64_high;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint8_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ uint8_t ebde_count;
+#else /* __LITTLE_ENDIAN */
+ uint8_t ebde_count;
+ uint8_t reserved3;
+ uint8_t reserved2;
+ uint8_t reserved1;
+#endif
+ uint32_t reserved4;
+ struct ulp_bde64 rbde; /* response bde */
+ struct ulp_bde64 dbde[LPFC_EXT_DATA_BDE_COUNT]; /* data BDE or BPL */
+ uint8_t icd[32]; /* immediate command data (32 bytes) */
+};
+
typedef struct _IOCB { /* IOCB structure */
union {
GENERIC_RSP grsp; /* Generic response */
@@ -3190,8 +3554,11 @@ typedef struct _IOCB { /* IOCB structure */
/* words 8-31 used for que_xri_cx iocb */
struct que_xri64cx_ext_fields que_xri64cx_ext_words;
-
+ struct fcp_irw_ext fcp_ext;
uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
+
+ /* words 8-15 for BlockGuard */
+ struct sli3_bg_fields sli3_bg;
} unsli3;
#define ulpCt_h ulpXS
@@ -3292,3 +3659,10 @@ lpfc_error_lost_link(IOCB_t *iocbp)
iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
}
+
+#define MENLO_TRANSPORT_TYPE 0xfe
+#define MENLO_CONTEXT 0
+#define MENLO_PU 3
+#define MENLO_TIMEOUT 30
+#define SETVAR_MLOMNT 0x103107
+#define SETVAR_MLORST 0x103007
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d51a2a4b43eb..4c77038c8f1c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -36,6 +36,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -44,6 +45,12 @@
#include "lpfc_vport.h"
#include "lpfc_version.h"
+char *_dump_buf_data;
+unsigned long _dump_buf_data_order;
+char *_dump_buf_dif;
+unsigned long _dump_buf_dif_order;
+spinlock_t _dump_buf_lock;
+
static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -52,17 +59,20 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
-/************************************************************************/
-/* */
-/* lpfc_config_port_prep */
-/* This routine will do LPFC initialization prior to the */
-/* CONFIG_PORT mailbox command. This will be initialized */
-/* as a SLI layer callback routine. */
-/* This routine returns 0 on success or -ERESTART if it wants */
-/* the SLI layer to reset the HBA and try again. Any */
-/* other return value indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
+ * mailbox command. It retrieves the revision information from the HBA and
+ * collects the Vital Product Data (VPD) about the HBA for preparing the
+ * configuration of the HBA.
+ *
+ * Return codes:
+ * 0 - success.
+ * -ERESTART - requests the SLI layer to reset the HBA and try again.
+ * Any other value - indicates an error.
+ **/
int
lpfc_config_port_prep(struct lpfc_hba *phba)
{
@@ -180,12 +190,9 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
sizeof (phba->RandomData));
/* Get adapter VPD information */
- pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL);
- if (!pmb->context2)
- goto out_free_mbox;
lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
if (!lpfc_vpd_data)
- goto out_free_context2;
+ goto out_free_mbox;
do {
lpfc_dump_mem(phba, pmb, offset);
@@ -200,21 +207,29 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
}
if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
- lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
+ lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+ lpfc_vpd_data + offset,
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
kfree(lpfc_vpd_data);
-out_free_context2:
- kfree(pmb->context2);
out_free_mbox:
mempool_free(pmb, phba->mbox_mem_pool);
return 0;
}
-/* Completion handler for config async event mailbox command. */
+/**
+ * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for driver's configuring asynchronous event
+ * mailbox command to the device. If the mailbox command returns successfully,
+ * it will set internal async event support flag to 1; otherwise, it will
+ * set internal async event support flag to 0.
+ **/
static void
lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
@@ -226,16 +241,64 @@ lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
-/************************************************************************/
-/* */
-/* lpfc_config_port_post */
-/* This routine will do LPFC initialization after the */
-/* CONFIG_PORT mailbox command. This will be initialized */
-/* as a SLI layer callback routine. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
+ * command used for getting wake up parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the completion handler for dump mailbox command for getting
+ * wake up parameters. When this command complete, the response contain
+ * Option rom version of the HBA. This function translate the version number
+ * into a human readable string and store it in OptionROMVersion.
+ **/
+static void
+lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct prog_id *prg;
+ uint32_t prog_id_word;
+ char dist = ' ';
+ /* character array used for decoding dist type. */
+ char dist_char[] = "nabx";
+
+ if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ prg = (struct prog_id *) &prog_id_word;
+
+ /* word 7 contain option rom version */
+ prog_id_word = pmboxq->mb.un.varWords[7];
+
+ /* Decode the Option rom version word to a readable string */
+ if (prg->dist < 4)
+ dist = dist_char[prg->dist];
+
+ if ((prg->dist == 3) && (prg->num == 0))
+ sprintf(phba->OptionROMVersion, "%d.%d%d",
+ prg->ver, prg->rev, prg->lev);
+ else
+ sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
+ prg->ver, prg->rev, prg->lev,
+ dist, prg->num);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+}
+
+/**
+ * lpfc_config_port_post: Perform lpfc initialization after config port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will do LPFC initialization after the CONFIG_PORT mailbox
+ * command call. It performs all internal resource and state setups on the
+ * port: post IOCB buffers, enable appropriate host interrupt attentions,
+ * ELS ring timers, etc.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
int
lpfc_config_port_post(struct lpfc_hba *phba)
{
@@ -378,6 +441,29 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if (phba->sli_rev != 3)
lpfc_post_rcv_buf(phba);
+ /*
+ * Configure HBA MSI-X attention conditions to messages if MSI-X mode
+ */
+ if (phba->intr_type == MSIX) {
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0352 Config MSI mailbox command "
+ "failed, mbxCmd x%x, mbxStatus x%x\n",
+ pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ }
+
+ /* Initialize ERATT handling flag */
+ phba->hba_flag &= ~HBA_ERATT_HANDLED;
+
/* Enable appropriate host interrupts */
spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
@@ -393,26 +479,26 @@ lpfc_config_port_post(struct lpfc_hba *phba)
if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
(phba->cfg_poll & DISABLE_FCP_RING_INT))
- status &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
+ status &= ~(HC_R0INT_ENA);
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irq(&phba->hbalock);
- /*
- * Setup the ring 0 (els) timeout handler
- */
- timeout = phba->fc_ratov << 1;
+ /* Set up ring-0 (ELS) timer */
+ timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+ /* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
+ /* Set up error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- pmb->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0454 Adapter failed to init, mbxCmd x%x "
@@ -447,19 +533,34 @@ lpfc_config_port_post(struct lpfc_hba *phba)
rc);
mempool_free(pmb, phba->mbox_mem_pool);
}
- return (0);
+
+ /* Get Option rom version */
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ lpfc_dump_wakeup_param(phba, pmb);
+ pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
+ pmb->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+
+ if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
+ "to get Option ROM version status x%x\n.", rc);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ }
+
+ return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_hba_down_prep */
-/* This routine will do LPFC uninitialization before the */
-/* HBA is reset when bringing down the SLI Layer. This will be */
-/* initialized as a SLI layer callback routine. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do LPFC uninitialization before the HBA is reset when
+ * bringing down the SLI Layer.
+ *
+ * Return codes
+ * 0 - success.
+ * Any other value - error.
+ **/
int
lpfc_hba_down_prep(struct lpfc_hba *phba)
{
@@ -481,15 +582,17 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_hba_down_post */
-/* This routine will do uninitialization after the HBA is reset */
-/* when bringing down the SLI Layer. */
-/* This routine returns 0 on success. Any other return value */
-/* indicates an error. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ * 0 - sucess.
+ * Any other value - error.
+ **/
int
lpfc_hba_down_post(struct lpfc_hba *phba)
{
@@ -548,7 +651,18 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
return 0;
}
-/* HBA heart beat timeout handler */
+/**
+ * lpfc_hb_timeout: The HBA-timer timeout handler.
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the HBA-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a HBA timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
+ * be performed in the timeout handler and the HBA timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
static void
lpfc_hb_timeout(unsigned long ptr)
{
@@ -557,17 +671,36 @@ lpfc_hb_timeout(unsigned long ptr)
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
+
+ /* Check for heart beat timeout conditions */
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_HB_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+ /* Tell the worker thread there is work to do */
if (!tmo_posted)
lpfc_worker_wake_up(phba);
return;
}
+/**
+ * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This is the callback function to the lpfc heart-beat mailbox command.
+ * If configured, the lpfc driver issues the heart-beat mailbox command to
+ * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
+ * heart-beat mailbox command is issued, the driver shall set up heart-beat
+ * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
+ * heart-beat outstanding state. Once the mailbox command comes back and
+ * no error conditions detected, the heart-beat mailbox command timer is
+ * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
+ * state is cleared for the next heart-beat. If the timer expired with the
+ * heart-beat outstanding state set, the driver will put the HBA offline.
+ **/
static void
lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
{
@@ -577,6 +710,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
phba->hb_outstanding = 0;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+ /* Check and reset heart-beat timer is necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
!(phba->link_state == LPFC_HBA_ERROR) &&
@@ -586,6 +720,22 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
return;
}
+/**
+ * lpfc_hb_timeout_handler: The HBA-timer timeout handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This is the actual HBA-timer timeout handler to be invoked by the worker
+ * thread whenever the HBA timer fired and HBA-timeout event posted. This
+ * handler performs any periodic operations needed for the device. If such
+ * periodic event has already been attended to either in the interrupt handler
+ * or by processing slow-ring or fast-ring events within the HBA-timer
+ * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
+ * the timer for the next timeout period. If lpfc heart-beat mailbox command
+ * is configured and there is no heart-beat mailbox command outstanding, a
+ * heart-beat mailbox is issued and timer set properly. Otherwise, if there
+ * has been a heart-beat mailbox command outstanding, the HBA shall be put
+ * to offline.
+ **/
void
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
{
@@ -601,11 +751,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
return;
spin_lock_irq(&phba->pport->work_port_lock);
- /* If the timer is already canceled do nothing */
- if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
- spin_unlock_irq(&phba->pport->work_port_lock);
- return;
- }
if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
jiffies)) {
@@ -684,6 +829,13 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
}
}
+/**
+ * lpfc_offline_eratt: Bring lpfc offline on hardware error attention.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring the HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
static void
lpfc_offline_eratt(struct lpfc_hba *phba)
{
@@ -704,14 +856,16 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
return;
}
-/************************************************************************/
-/* */
-/* lpfc_handle_eratt */
-/* This routine will handle processing a Host Attention */
-/* Error Status event. This will be initialized */
-/* as a SLI layer callback routine. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_handle_eratt: The HBA hardware error handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the following HBA hardware error
+ * conditions:
+ * 1 - HBA error attention interrupt
+ * 2 - DMA ring index out of range
+ * 3 - Mailbox command came back as unknown
+ **/
void
lpfc_handle_eratt(struct lpfc_hba *phba)
{
@@ -722,6 +876,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
unsigned long temperature;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
+ struct lpfc_board_event_header board_event;
/* If the pci channel is offline, ignore possible errors,
* since we cannot communicate with the pci card anyway. */
@@ -731,6 +886,15 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return;
+ /* Send an internal error event to mgmt application */
+ board_event.event_type = FC_REG_BOARD_EVENT;
+ board_event.subcategory = LPFC_EVENT_PORTINTERR;
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(board_event),
+ (char *) &board_event,
+ LPFC_NL_VENDOR_ID);
+
if (phba->work_hs & HS_FFER6) {
/* Re-establishing Link */
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -771,7 +935,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
temp_event_data.data = (uint32_t)temperature;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0459 Adapter maximum temperature exceeded "
+ "0406 Adapter maximum temperature exceeded "
"(%ld), taking this port offline "
"Data: x%x x%x x%x\n",
temperature, phba->work_hs,
@@ -791,8 +955,8 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
} else {
/* The if clause above forces this code path when the status
- * failure is a value other than FFER6. Do not call the offline
- * twice. This is the adapter hardware error path.
+ * failure is a value other than FFER6. Do not call the offline
+ * twice. This is the adapter hardware error path.
*/
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0457 Adapter Hardware Error "
@@ -808,16 +972,16 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
lpfc_offline_eratt(phba);
}
+ return;
}
-/************************************************************************/
-/* */
-/* lpfc_handle_latt */
-/* This routine will handle processing a Host Attention */
-/* Link Status event. This will be initialized */
-/* as a SLI layer callback routine. */
-/* */
-/************************************************************************/
+/**
+ * lpfc_handle_latt: The HBA link event handler.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked from the worker thread to handle a HBA host
+ * attention link event.
+ **/
void
lpfc_handle_latt(struct lpfc_hba *phba)
{
@@ -898,12 +1062,20 @@ lpfc_handle_latt_err_exit:
return;
}
-/************************************************************************/
-/* */
-/* lpfc_parse_vpd */
-/* This routine will parse the VPD data */
-/* */
-/************************************************************************/
+/**
+ * lpfc_parse_vpd: Parse VPD (Vital Product Data).
+ * @phba: pointer to lpfc hba data structure.
+ * @vpd: pointer to the vital product data.
+ * @len: length of the vital product data in bytes.
+ *
+ * This routine parses the Vital Product Data (VPD). The VPD is treated as
+ * an array of characters. In this routine, the ModelName, ProgramType, and
+ * ModelDesc, etc. fields of the phba data structure will be populated.
+ *
+ * Return codes
+ * 0 - pointer to the VPD passed in is NULL
+ * 1 - success
+ **/
static int
lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
@@ -1040,12 +1212,25 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
return(1);
}
+/**
+ * lpfc_get_hba_model_desc: Retrieve HBA device model name and description.
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
static void
lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
{
lpfc_vpd_t *vp;
uint16_t dev_id = phba->pcidev->device;
int max_speed;
+ int GE = 0;
struct {
char * name;
int max_speed;
@@ -1177,6 +1362,19 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
break;
+ case PCI_DEVICE_ID_HORNET:
+ m = (typeof(m)){"LP21000", max_speed, "PCIe"};
+ GE = 1;
+ break;
+ case PCI_DEVICE_ID_PROTEUS_VF:
+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_PF:
+ m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
+ break;
+ case PCI_DEVICE_ID_PROTEUS_S:
+ m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
+ break;
default:
m = (typeof(m)){ NULL };
break;
@@ -1186,18 +1384,25 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
snprintf(mdp, 79,"%s", m.name);
if (descp && descp[0] == '\0')
snprintf(descp, 255,
- "Emulex %s %dGb %s Fibre Channel Adapter",
- m.name, m.max_speed, m.bus);
+ "Emulex %s %d%s %s %s",
+ m.name, m.max_speed,
+ (GE) ? "GE" : "Gb",
+ m.bus,
+ (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
}
-/**************************************************/
-/* lpfc_post_buffer */
-/* */
-/* This routine will post count buffers to the */
-/* ring with the QUE_RING_BUF_CN command. This */
-/* allows 3 buffers / command to be posted. */
-/* Returns the number of buffers NOT posted. */
-/**************************************************/
+/**
+ * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a IOCB ring.
+ * @cnt: the number of IOCBs to be posted to the IOCB ring.
+ *
+ * This routine posts a given number of IOCBs with the associated DMA buffer
+ * descriptors specified by the cnt argument to the given IOCB ring.
+ *
+ * Return codes
+ * The number of IOCBs NOT able to be posted to the IOCB ring.
+ **/
int
lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
@@ -1287,12 +1492,17 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
return 0;
}
-/************************************************************************/
-/* */
-/* lpfc_post_rcv_buf */
-/* This routine post initial rcv buffers to the configured rings */
-/* */
-/************************************************************************/
+/**
+ * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine posts initial receive IOCB buffers to the ELS ring. The
+ * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
+ * set to 64 IOCBs.
+ *
+ * Return codes
+ * 0 - success (currently always success)
+ **/
static int
lpfc_post_rcv_buf(struct lpfc_hba *phba)
{
@@ -1307,11 +1517,13 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
-/************************************************************************/
-/* */
-/* lpfc_sha_init */
-/* */
-/************************************************************************/
+/**
+ * lpfc_sha_init: Set up initial array of hash table entries.
+ * @HashResultPointer: pointer to an array as hash table.
+ *
+ * This routine sets up the initial values to the array of hash table entries
+ * for the LC HBAs.
+ **/
static void
lpfc_sha_init(uint32_t * HashResultPointer)
{
@@ -1322,11 +1534,16 @@ lpfc_sha_init(uint32_t * HashResultPointer)
HashResultPointer[4] = 0xC3D2E1F0;
}
-/************************************************************************/
-/* */
-/* lpfc_sha_iterate */
-/* */
-/************************************************************************/
+/**
+ * lpfc_sha_iterate: Iterate initial hash table with the working hash table.
+ * @HashResultPointer: pointer to an initial/result hash table.
+ * @HashWorkingPointer: pointer to an working hash table.
+ *
+ * This routine iterates an initial hash table pointed by @HashResultPointer
+ * with the values from the working hash table pointeed by @HashWorkingPointer.
+ * The results are putting back to the initial hash table, returned through
+ * the @HashResultPointer as the result hash table.
+ **/
static void
lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
{
@@ -1374,22 +1591,29 @@ lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
}
-/************************************************************************/
-/* */
-/* lpfc_challenge_key */
-/* */
-/************************************************************************/
+/**
+ * lpfc_challenge_key: Create challenge key based on WWPN of the HBA.
+ * @RandomChallenge: pointer to the entry of host challenge random number array.
+ * @HashWorking: pointer to the entry of the working hash array.
+ *
+ * This routine calculates the working hash array referred by @HashWorking
+ * from the challenge random numbers associated with the host, referred by
+ * @RandomChallenge. The result is put into the entry of the working hash
+ * array and returned by reference through @HashWorking.
+ **/
static void
lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
{
*HashWorking = (*RandomChallenge ^ *HashWorking);
}
-/************************************************************************/
-/* */
-/* lpfc_hba_init */
-/* */
-/************************************************************************/
+/**
+ * lpfc_hba_init: Perform special handling for LC HBA initialization.
+ * @phba: pointer to lpfc hba data structure.
+ * @hbainit: pointer to an array of unsigned 32-bit integers.
+ *
+ * This routine performs the special handling for LC HBA initialization.
+ **/
void
lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
{
@@ -1412,6 +1636,15 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
kfree(HashWorking);
}
+/**
+ * lpfc_cleanup: Performs vport cleanups before deleting a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine performs the necessary cleanups before deleting the @vport.
+ * It invokes the discovery state machine to perform necessary state
+ * transitions and to release the ndlps associated with the @vport. Note,
+ * the physical port is treated as @vport 0.
+ **/
void
lpfc_cleanup(struct lpfc_vport *vport)
{
@@ -1459,14 +1692,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
- /* nlp_type zero is not defined, nlp_flag zero also not defined,
- * nlp_state is unused, this happens when
- * an initiator has logged
- * into us so cleanup this ndlp.
- */
- if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
- (ndlp->nlp_state == 0))
- lpfc_nlp_put(ndlp);
}
/* At this point, ALL ndlp's should be gone
@@ -1482,7 +1707,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_NODE,
- "0282: did:x%x ndlp:x%p "
+ "0282 did:x%x ndlp:x%p "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
@@ -1498,6 +1723,14 @@ lpfc_cleanup(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_stop_vport_timers: Stop all the timers associated with a vport.
+ * @vport: pointer to a virtual N_Port data structure.
+ *
+ * This routine stops all the timers associated with a @vport. This function
+ * is invoked before disabling or deleting a @vport. Note that the physical
+ * port is treated as @vport 0.
+ **/
void
lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
@@ -1507,6 +1740,13 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_stop_phba_timers: Stop all the timers associated with an HBA.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops all the timers associated with a HBA. This function is
+ * invoked before either putting a HBA offline or unloading the driver.
+ **/
static void
lpfc_stop_phba_timers(struct lpfc_hba *phba)
{
@@ -1516,9 +1756,20 @@ lpfc_stop_phba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
phba->hb_outstanding = 0;
del_timer_sync(&phba->hb_tmofunc);
+ del_timer_sync(&phba->eratt_poll);
return;
}
+/**
+ * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as blocked. Once the HBA's
+ * management interface is marked as blocked, all the user space access to
+ * the HBA, whether they are from sysfs interface or libdfc interface will
+ * all be blocked. The HBA is set to block the management interface when the
+ * driver prepares the HBA interface for online or offline.
+ **/
static void
lpfc_block_mgmt_io(struct lpfc_hba * phba)
{
@@ -1529,6 +1780,18 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
+/**
+ * lpfc_online: Initialize and bring a HBA online.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine initializes the HBA and brings a HBA online. During this
+ * process, the management interface is blocked to prevent user space access
+ * to the HBA interfering with the driver initialization.
+ *
+ * Return codes
+ * 0 - successful
+ * 1 - failed
+ **/
int
lpfc_online(struct lpfc_hba *phba)
{
@@ -1574,6 +1837,17 @@ lpfc_online(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine marks a HBA's management interface as not blocked. Once the
+ * HBA's management interface is marked as not blocked, all the user space
+ * access to the HBA, whether they are from sysfs interface or libdfc
+ * interface will be allowed. The HBA is set to block the management interface
+ * when the driver prepares the HBA interface for online or offline and then
+ * set to unblock the management interface afterwards.
+ **/
void
lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
{
@@ -1584,6 +1858,14 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
+/**
+ * lpfc_offline_prep: Prepare a HBA to be brought offline.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to prepare a HBA to be brought offline. It performs
+ * unregistration login to all the nodes on all vports and flushes the mailbox
+ * queue to make it ready to be brought offline.
+ **/
void
lpfc_offline_prep(struct lpfc_hba * phba)
{
@@ -1633,6 +1915,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
lpfc_sli_flush_mbox_queue(phba);
}
+/**
+ * lpfc_offline: Bring a HBA offline.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine actually brings a HBA offline. It stops all the timers
+ * associated with the HBA, brings down the SLI layer, and eventually
+ * marks the HBA as in offline state for the upper layer protocol.
+ **/
void
lpfc_offline(struct lpfc_hba *phba)
{
@@ -1670,12 +1960,17 @@ lpfc_offline(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
}
-/******************************************************************************
-* Function name: lpfc_scsi_free
-*
-* Description: Called from lpfc_pci_remove_one free internal driver resources
-*
-******************************************************************************/
+/**
+ * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the SCSI buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
static int
lpfc_scsi_free(struct lpfc_hba *phba)
{
@@ -1704,6 +1999,22 @@ lpfc_scsi_free(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_create_port: Create an FC port.
+ * @phba: pointer to lpfc hba data structure.
+ * @instance: a unique integer ID to this FC port.
+ * @dev: pointer to the device data structure.
+ *
+ * This routine creates a FC port for the upper layer protocol. The FC port
+ * can be created on top of either a physical port or a virtual port provided
+ * by the HBA. This routine also allocates a SCSI host data structure (shost)
+ * and associates the FC port created before adding the shost into the SCSI
+ * layer.
+ *
+ * Return codes
+ * @vport - pointer to the virtual N_Port data structure.
+ * NULL - port create failed.
+ **/
struct lpfc_vport *
lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
@@ -1732,6 +2043,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_lun = vport->cfg_max_luns;
shost->this_id = -1;
shost->max_cmd_len = 16;
+
/*
* Set initial can_queue value since 0 is no longer supported and
* scsi_add_host will fail. This will be adjusted later based on the
@@ -1777,14 +2089,19 @@ out:
return NULL;
}
+/**
+ * destroy_port: Destroy an FC port.
+ * @vport: pointer to an lpfc virtual N_Port data structure.
+ *
+ * This routine destroys a FC port from the upper layer protocol. All the
+ * resources associated with the port are released.
+ **/
void
destroy_port(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- kfree(vport->vname);
-
lpfc_debugfs_terminate(vport);
fc_remove_host(shost);
scsi_remove_host(shost);
@@ -1797,6 +2114,16 @@ destroy_port(struct lpfc_vport *vport)
return;
}
+/**
+ * lpfc_get_instance: Get a unique integer ID.
+ *
+ * This routine allocates a unique integer ID from lpfc_hba_index pool. It
+ * uses the kernel idr facility to perform the task.
+ *
+ * Return codes:
+ * instance - a unique integer ID allocated as the new instance.
+ * -1 - lpfc get instance failed.
+ **/
int
lpfc_get_instance(void)
{
@@ -1810,11 +2137,21 @@ lpfc_get_instance(void)
return instance;
}
-/*
- * Note: there is no scan_start function as adapter initialization
- * will have asynchronously kicked off the link initialization.
- */
-
+/**
+ * lpfc_scan_finished: method for SCSI layer to detect whether scan is done.
+ * @shost: pointer to SCSI host data structure.
+ * @time: elapsed time of the scan in jiffies.
+ *
+ * This routine is called by the SCSI layer with a SCSI host to determine
+ * whether the scan host is finished.
+ *
+ * Note: there is no scan_start function as adapter initialization will have
+ * asynchronously kicked off the link initialization.
+ *
+ * Return codes
+ * 0 - SCSI host scan is not over yet.
+ * 1 - SCSI host scan is over.
+ **/
int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1858,6 +2195,13 @@ finished:
return stat;
}
+/**
+ * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port.
+ * @shost: pointer to SCSI host data structure.
+ *
+ * This routine initializes a given SCSI host attributes on a FC port. The
+ * SCSI host can be either on top of a physical port or a virtual port.
+ **/
void lpfc_host_attrib_init(struct Scsi_Host *shost)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -1906,42 +2250,345 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
spin_unlock_irq(shost->host_lock);
}
+/**
+ * lpfc_enable_msix: Enable MSI-X interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
+ * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
+ * pci_enable_msix(), once invoked, enables either all or nothing, depending
+ * on the current availability of PCI vector resources. The device driver is
+ * responsible for calling the individual request_irq() to register each MSI-X
+ * vector with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
static int
lpfc_enable_msix(struct lpfc_hba *phba)
{
- int error;
+ int rc, i;
+ LPFC_MBOXQ_t *pmb;
- phba->msix_entries[0].entry = 0;
- phba->msix_entries[0].vector = 0;
+ /* Set up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ phba->msix_entries[i].entry = i;
- error = pci_enable_msix(phba->pcidev, phba->msix_entries,
+ /* Configure MSI-X capability structure */
+ rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
ARRAY_SIZE(phba->msix_entries));
- if (error) {
+ if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0420 Enable MSI-X failed (%d), continuing "
- "with MSI\n", error);
- pci_disable_msix(phba->pcidev);
- return error;
+ "0420 PCI enable MSI-X failed (%d)\n", rc);
+ goto msi_fail_out;
+ } else
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0477 MSI-X entry[%d]: vector=x%x "
+ "message=%d\n", i,
+ phba->msix_entries[i].vector,
+ phba->msix_entries[i].entry);
+ /*
+ * Assign MSI-X vectors to interrupt handlers
+ */
+
+ /* vector-0 is associated to slow-path handler */
+ rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
+ IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0421 MSI-X slow-path request_irq failed "
+ "(%d)\n", rc);
+ goto msi_fail_out;
}
- error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0,
- LPFC_DRIVER_NAME, phba);
- if (error) {
+ /* vector-1 is associated to fast-path handler */
+ rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
+ IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0429 MSI-X fast-path request_irq failed "
+ "(%d)\n", rc);
+ goto irq_fail_out;
+ }
+
+ /*
+ * Configure HBA MSI-X attention conditions to messages
+ */
+ pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+ if (!pmb) {
+ rc = -ENOMEM;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0421 MSI-X request_irq failed (%d), "
- "continuing with MSI\n", error);
- pci_disable_msix(phba->pcidev);
+ "0474 Unable to allocate memory for issuing "
+ "MBOX_CONFIG_MSI command\n");
+ goto mem_fail_out;
}
- return error;
+ rc = lpfc_config_msi(phba, pmb);
+ if (rc)
+ goto mbx_fail_out;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "0351 Config MSI mailbox command failed, "
+ "mbxCmd x%x, mbxStatus x%x\n",
+ pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+ goto mbx_fail_out;
+ }
+
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return rc;
+
+mbx_fail_out:
+ /* Free memory allocated for mailbox command */
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+ /* free the irq already requested */
+ free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+ /* Unconfigure MSI-X capability structure */
+ pci_disable_msix(phba->pcidev);
+ return rc;
}
+/**
+ * lpfc_disable_msix: Disable MSI-X interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode.
+ **/
static void
lpfc_disable_msix(struct lpfc_hba *phba)
{
- free_irq(phba->msix_entries[0].vector, phba);
+ int i;
+
+ /* Free up MSI-X multi-message vectors */
+ for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+ free_irq(phba->msix_entries[i].vector, phba);
+ /* Disable MSI-X */
pci_disable_msix(phba->pcidev);
}
+/**
+ * lpfc_enable_msi: Enable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode. The kernel
+ * function pci_enable_msi() is called to enable the MSI vector. The
+ * device driver is responsible for calling the request_irq() to register
+ * MSI vector with a interrupt the handler, which is done in this function.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ */
+static int
+lpfc_enable_msi(struct lpfc_hba *phba)
+{
+ int rc;
+
+ rc = pci_enable_msi(phba->pcidev);
+ if (!rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0462 PCI enable MSI mode success.\n");
+ else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0471 PCI enable MSI mode failed (%d)\n", rc);
+ return rc;
+ }
+
+ rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (rc) {
+ pci_disable_msi(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0478 MSI request_irq failed (%d)\n", rc);
+ }
+ return rc;
+}
+
+/**
+ * lpfc_disable_msi: Disable MSI interrupt mode.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode. The driver
+ * calls free_irq() on MSI vector it has done request_irq() on before
+ * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
+ * a device will be left with MSI enabled and leaks its vector.
+ */
+
+static void
+lpfc_disable_msi(struct lpfc_hba *phba)
+{
+ free_irq(phba->pcidev->irq, phba);
+ pci_disable_msi(phba->pcidev);
+ return;
+}
+
+/**
+ * lpfc_log_intr_mode: Log the active interrupt mode
+ * @phba: pointer to lpfc hba data structure.
+ * @intr_mode: active interrupt mode adopted.
+ *
+ * This routine it invoked to log the currently used active interrupt mode
+ * to the device.
+ */
+static void
+lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+{
+ switch (intr_mode) {
+ case 0:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0470 Enable INTx interrupt mode.\n");
+ break;
+ case 1:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0481 Enabled MSI interrupt mode.\n");
+ break;
+ case 2:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0480 Enabled MSI-X interrupt mode.\n");
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0482 Illegal interrupt mode.\n");
+ break;
+ }
+ return;
+}
+
+static void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+
+ /* Reset some HBA SLI setup states */
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
+
+ return;
+}
+
+/**
+ * lpfc_enable_intr: Enable device interrupt.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
+ * mode configured to the driver, the driver will try to fallback from the
+ * configured interrupt mode to an interrupt mode which is supported by the
+ * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static uint32_t
+lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+ uint32_t intr_mode = LPFC_INTR_ERROR;
+ int retval;
+
+ if (cfg_mode == 2) {
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ retval = lpfc_sli_config_port(phba, 3);
+ if (!retval) {
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_enable_msix(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
+ }
+ }
+ }
+
+ /* Fallback to MSI if MSI-X initialization failed */
+ if (cfg_mode >= 1 && phba->intr_type == NONE) {
+ retval = lpfc_enable_msi(phba);
+ if (!retval) {
+ /* Indicate initialization to MSI mode */
+ phba->intr_type = MSI;
+ intr_mode = 1;
+ }
+ }
+
+ /* Fallback to INTx if both MSI-X/MSI initalization failed */
+ if (phba->intr_type == NONE) {
+ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+ if (!retval) {
+ /* Indicate initialization to INTx mode */
+ phba->intr_type = INTx;
+ intr_mode = 0;
+ }
+ }
+ return intr_mode;
+}
+
+/**
+ * lpfc_disable_intr: Disable device interrupt.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s). Depending on the
+ * interrupt mode, the driver will release the interrupt vector(s) for the
+ * message signaled interrupt.
+ **/
+static void
+lpfc_disable_intr(struct lpfc_hba *phba)
+{
+ /* Disable the currently initialized interrupt mode */
+ if (phba->intr_type == MSIX)
+ lpfc_disable_msix(phba);
+ else if (phba->intr_type == MSI)
+ lpfc_disable_msi(phba);
+ else if (phba->intr_type == INTx)
+ free_irq(phba->pcidev->irq, phba);
+
+ /* Reset interrupt management states */
+ phba->intr_type = NONE;
+ phba->sli.slistat.sli_intr = 0;
+
+ return;
+}
+
+/**
+ * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
+ * PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. If this routine
+ * determines it can claim the HBA, it does all the initialization that it
+ * needs to do to handle the HBA properly.
+ *
+ * Return code
+ * 0 - driver can claim the device
+ * negative value - driver can not claim the device
+ **/
static int __devinit
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
@@ -1955,7 +2602,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
int error = -ENODEV, retval;
int i, hbq_count;
uint16_t iotag;
+ uint32_t cfg_mode, intr_mode;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ struct lpfc_adapter_event_header adapter_event;
if (pci_enable_device_mem(pdev))
goto out;
@@ -1966,6 +2615,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (!phba)
goto out_release_regions;
+ atomic_set(&phba->fast_event_count, 0);
spin_lock_init(&phba->hbalock);
/* Initialize ndlp management spinlock */
@@ -1978,6 +2628,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_phba;
INIT_LIST_HEAD(&phba->port_list);
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
/*
* Get all the module params for configuring this host and then
* establish the host.
@@ -2000,8 +2651,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
init_timer(&phba->fabric_block_timer);
phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
phba->fabric_block_timer.data = (unsigned long) phba;
+ init_timer(&phba->eratt_poll);
+ phba->eratt_poll.function = lpfc_poll_eratt;
+ phba->eratt_poll.data = (unsigned long) phba;
pci_set_master(pdev);
+ pci_save_state(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
@@ -2019,7 +2674,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
bar2map_len = pci_resource_len(phba->pcidev, 2);
/* Map HBA SLIM to a kernel virtual address. */
- phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
if (!phba->slim_memmap_p) {
error = -ENODEV;
dev_printk(KERN_ERR, &pdev->dev,
@@ -2037,12 +2692,18 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
}
/* Allocate memory for SLI-2 structures */
- phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
- &phba->slim2p_mapping, GFP_KERNEL);
- if (!phba->slim2p)
+ phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ SLI2_SLIM_SIZE,
+ &phba->slim2p.phys,
+ GFP_KERNEL);
+ if (!phba->slim2p.virt)
goto out_iounmap;
- memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
+ memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+ phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+ phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+ phba->IOCBs = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, IOCBs));
phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
lpfc_sli_hbq_size(),
@@ -2111,7 +2772,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->fc_arbtov = FF_DEF_ARBTOV;
INIT_LIST_HEAD(&phba->work_list);
- phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
+ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
/* Initialize the wait queue head for the kernel thread */
@@ -2144,37 +2805,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_debugfs_initialize(vport);
pci_set_drvdata(pdev, shost);
- phba->intr_type = NONE;
-
- if (phba->cfg_use_msi == 2) {
- error = lpfc_enable_msix(phba);
- if (!error)
- phba->intr_type = MSIX;
- }
-
- /* Fallback to MSI if MSI-X initialization failed */
- if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
- retval = pci_enable_msi(phba->pcidev);
- if (!retval)
- phba->intr_type = MSI;
- else
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0452 Enable MSI failed, continuing "
- "with IRQ\n");
- }
-
- /* MSI-X is the only case the doesn't need to call request_irq */
- if (phba->intr_type != MSIX) {
- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
- if (retval) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
- "interrupt handler failed\n");
- error = retval;
- goto out_disable_msi;
- } else if (phba->intr_type != MSI)
- phba->intr_type = INTx;
- }
phba->MBslimaddr = phba->slim_memmap_p;
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
@@ -2182,14 +2812,58 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+ /* Configure sysfs attributes */
if (lpfc_alloc_sysfs_attr(vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1476 Failed to allocate sysfs attr\n");
error = -ENOMEM;
- goto out_free_irq;
+ goto out_destroy_port;
}
- if (lpfc_sli_hba_setup(phba)) {
- error = -ENODEV;
- goto out_remove_device;
+ cfg_mode = phba->cfg_use_msi;
+ while (true) {
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ goto out_free_sysfs_attr;
+ }
+ /* HBA SLI setup */
+ if (lpfc_sli_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1477 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_remove_device;
+ }
+
+ /* Wait 50ms for the interrupts of previous mailbox commands */
+ msleep(50);
+ /* Check active interrupts received */
+ if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
+ break;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0451 Configure interrupt mode (%d) "
+ "failed active interrupt test.\n",
+ intr_mode);
+ if (intr_mode == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0479 Failed to enable "
+ "interrupt.\n");
+ error = -ENODEV;
+ goto out_remove_device;
+ }
+ /* Stop HBA SLI setups */
+ lpfc_stop_port(phba);
+ /* Disable the current interrupt mode */
+ lpfc_disable_intr(phba);
+ /* Try next level of interrupt mode */
+ cfg_mode = --intr_mode;
+ }
}
/*
@@ -2197,6 +2871,75 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* the value of can_queue.
*/
shost->can_queue = phba->cfg_hba_queue_depth - 10;
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+
+ if (lpfc_prot_mask && lpfc_prot_guard) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1478 Registering BlockGuard with the "
+ "SCSI layer\n");
+
+ scsi_host_set_prot(shost, lpfc_prot_mask);
+ scsi_host_set_guard(shost, lpfc_prot_guard);
+ }
+ }
+
+ if (!_dump_buf_data) {
+ int pagecnt = 10;
+ while (pagecnt) {
+ spin_lock_init(&_dump_buf_lock);
+ _dump_buf_data =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_data) {
+ printk(KERN_ERR "BLKGRD allocated %d pages for "
+ "_dump_buf_data at 0x%p\n",
+ (1 << pagecnt), _dump_buf_data);
+ _dump_buf_data_order = pagecnt;
+ memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
+ << pagecnt));
+ break;
+ } else {
+ --pagecnt;
+ }
+
+ }
+
+ if (!_dump_buf_data_order)
+ printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+ "memory for hexdump\n");
+
+ } else {
+ printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+ "\n", _dump_buf_data);
+ }
+
+
+ if (!_dump_buf_dif) {
+ int pagecnt = 10;
+ while (pagecnt) {
+ _dump_buf_dif =
+ (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+ if (_dump_buf_dif) {
+ printk(KERN_ERR "BLKGRD allocated %d pages for "
+ "_dump_buf_dif at 0x%p\n",
+ (1 << pagecnt), _dump_buf_dif);
+ _dump_buf_dif_order = pagecnt;
+ memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
+ << pagecnt));
+ break;
+ } else {
+ --pagecnt;
+ }
+
+ }
+
+ if (!_dump_buf_dif_order)
+ printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+ "memory for hexdump\n");
+
+ } else {
+ printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+ _dump_buf_dif);
+ }
lpfc_host_attrib_init(shost);
@@ -2206,27 +2949,30 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
spin_unlock_irq(shost->host_lock);
}
- scsi_scan_host(shost);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0428 Perform SCSI scan\n");
+ /* Send board arrival event to upper layer */
+ adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+ adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(adapter_event),
+ (char *) &adapter_event,
+ LPFC_NL_VENDOR_ID);
return 0;
out_remove_device:
- lpfc_free_sysfs_attr(vport);
spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
-out_free_irq:
lpfc_stop_phba_timers(phba);
phba->pport->work_port_events = 0;
-
- if (phba->intr_type == MSIX)
- lpfc_disable_msix(phba);
- else
- free_irq(phba->pcidev->irq, phba);
-
-out_disable_msi:
- if (phba->intr_type == MSI)
- pci_disable_msi(phba->pcidev);
+ lpfc_disable_intr(phba);
+ lpfc_sli_hba_down(phba);
+ lpfc_sli_brdrestart(phba);
+out_free_sysfs_attr:
+ lpfc_free_sysfs_attr(vport);
+out_destroy_port:
destroy_port(vport);
out_kthread_stop:
kthread_stop(phba->worker_thread);
@@ -2238,11 +2984,11 @@ out_free_iocbq:
}
lpfc_mem_free(phba);
out_free_hbqslimp:
- dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
- phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
out_free_slim:
- dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
- phba->slim2p_mapping);
+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+ phba->slim2p.virt, phba->slim2p.phys);
out_iounmap:
iounmap(phba->ctrl_regs_memmap_p);
out_iounmap_slim:
@@ -2262,23 +3008,40 @@ out:
return error;
}
+/**
+ * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
+ * for the HBA device to be removed from the PCI subsystem properly.
+ **/
static void __devexit
lpfc_pci_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_vport **vports;
struct lpfc_hba *phba = vport->phba;
+ int i;
int bars = pci_select_bars(pdev, IORESOURCE_MEM);
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
- kfree(vport->vname);
lpfc_free_sysfs_attr(vport);
kthread_stop(phba->worker_thread);
+ /* Release all the vports against this physical port */
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
+ fc_vport_terminate(vports[i]->fc_vport);
+ lpfc_destroy_vport_work_array(phba, vports);
+
+ /* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
lpfc_cleanup(vport);
@@ -2298,13 +3061,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_debugfs_terminate(vport);
- if (phba->intr_type == MSIX)
- lpfc_disable_msix(phba);
- else {
- free_irq(phba->pcidev->irq, phba);
- if (phba->intr_type == MSI)
- pci_disable_msi(phba->pcidev);
- }
+ /* Disable interrupt */
+ lpfc_disable_intr(phba);
pci_set_drvdata(pdev, NULL);
scsi_host_put(shost);
@@ -2316,12 +3074,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
lpfc_scsi_free(phba);
lpfc_mem_free(phba);
- dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
- phba->hbqslimp.phys);
+ dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+ phba->hbqslimp.virt, phba->hbqslimp.phys);
/* Free resources associated with SLI2 interface */
dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
- phba->slim2p, phba->slim2p_mapping);
+ phba->slim2p.virt, phba->slim2p.phys);
/* unmap adapter SLIM and Control Registers */
iounmap(phba->ctrl_regs_memmap_p);
@@ -2336,13 +3094,130 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
}
/**
- * lpfc_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
+ * @pdev: pointer to PCI device
+ * @msg: power management message
*
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- */
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it quiesces the
+ * device by stopping the driver's worker thread for the device, turning off
+ * device's interrupt and DMA, and bring the device offline. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's PM
+ * support for suspend/resume -- all the possible PM messages (SUSPEND,
+ * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
+ * and the driver will fully reinitialize its device during resume() method
+ * call, the driver will set device to PCI_D3hot state in PCI config space
+ * instead of setting it according to the @msg provided by the PM.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0473 PCI device Power Management suspend.\n");
+
+ /* Bring down the device */
+ lpfc_offline_prep(phba);
+ lpfc_offline(phba);
+ kthread_stop(phba->worker_thread);
+
+ /* Disable interrupt from device */
+ lpfc_disable_intr(phba);
+
+ /* Save device state to PCI config space */
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it restores
+ * the device's PCI config space state and fully reinitializes the device
+ * and brings it online. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the device will be
+ * set to PCI_D0 directly in PCI config space before restoring the state.
+ *
+ * Return code
+ * 0 - driver suspended the device
+ * Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ uint32_t intr_mode;
+ int error;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0452 PCI device Power Management resume.\n");
+
+ /* Restore device state from PCI config space */
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
+
+ /* Startup the kernel thread for this host adapter. */
+ phba->worker_thread = kthread_run(lpfc_do_work, phba,
+ "lpfc_worker_%d", phba->brd_no);
+ if (IS_ERR(phba->worker_thread)) {
+ error = PTR_ERR(phba->worker_thread);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0434 PM resume failed to start worker "
+ "thread: error=x%x.\n", error);
+ return error;
+ }
+
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0430 PM resume Failed to enable interrupt\n");
+ return -EIO;
+ } else
+ phba->intr_mode = intr_mode;
+
+ /* Restart HBA and bring it online */
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
+ return 0;
+}
+
+/**
+ * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this function is invoked, it will
+ * need to stop all the I/Os and interrupt(s) to the device. Once that is
+ * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
+ * perform proper recovery as desired.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2351,8 +3226,15 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0472 PCI channel I/O permanent failure\n");
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+ /* Clean up all driver's outstanding SCSI I/Os */
+ lpfc_sli_flush_fcp_rings(phba);
return PCI_ERS_RESULT_DISCONNECT;
+ }
pci_disable_device(pdev);
/*
@@ -2363,30 +3245,36 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
- if (phba->intr_type == MSIX)
- lpfc_disable_msix(phba);
- else {
- free_irq(phba->pcidev->irq, phba);
- if (phba->intr_type == MSI)
- pci_disable_msi(phba->pcidev);
- }
+ /* Disable interrupt */
+ lpfc_disable_intr(phba);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/**
- * lpfc_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
+ * lpfc_io_slot_reset: Restart a PCI device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This is
+ * called after PCI bus has been reset to restart the PCI card from scratch,
+ * as if from a cold-boot. During the PCI subsystem error recovery, after the
+ * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
+ * proper error recovery and then call this routine before calling the .resume
+ * method to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
*
- * Restart the card from scratch, as if from a cold-boot.
+ * Return codes
+ * PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
*/
static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
- int error, retval;
+ uint32_t intr_mode;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
@@ -2395,56 +3283,42 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_set_master(pdev);
+ pci_restore_state(pdev);
+ if (pdev->is_busmaster)
+ pci_set_master(pdev);
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- /* Enable configured interrupt method */
- phba->intr_type = NONE;
- if (phba->cfg_use_msi == 2) {
- error = lpfc_enable_msix(phba);
- if (!error)
- phba->intr_type = MSIX;
- }
-
- /* Fallback to MSI if MSI-X initialization failed */
- if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
- retval = pci_enable_msi(phba->pcidev);
- if (!retval)
- phba->intr_type = MSI;
- else
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0470 Enable MSI failed, continuing "
- "with IRQ\n");
- }
-
- /* MSI-X is the only case the doesn't need to call request_irq */
- if (phba->intr_type != MSIX) {
- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
- if (retval) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0471 Enable interrupt handler "
- "failed\n");
- } else if (phba->intr_type != MSI)
- phba->intr_type = INTx;
- }
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0427 Cannot re-enable interrupt after "
+ "slot reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ } else
+ phba->intr_mode = intr_mode;
/* Take device offline; this will perform cleanup */
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
+ /* Log the current active interrupt mode */
+ lpfc_log_intr_mode(phba, phba->intr_mode);
+
return PCI_ERS_RESULT_RECOVERED;
}
/**
- * lpfc_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
+ * lpfc_io_resume: Resume PCI I/O operation.
+ * @pdev: pointer to PCI device
*
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation.
+ * This routine is registered to the PCI subsystem for error handling. It is
+ * called when kernel error recovery tells the lpfc driver that it is ok to
+ * resume normal PCI operation after PCI bus error recovery. After this call,
+ * traffic can start to flow from this device again.
*/
static void lpfc_io_resume(struct pci_dev *pdev)
{
@@ -2491,6 +3365,8 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+ PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -2521,6 +3397,12 @@ static struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }
};
@@ -2537,9 +3419,23 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
+ .suspend = lpfc_pci_suspend_one,
+ .resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
};
+/**
+ * lpfc_init: lpfc module initialization routine.
+ *
+ * This routine is to be invoked when the lpfc module is loaded into the
+ * kernel. The special kernel macro module_init() is used to indicate the
+ * role of this routine to the kernel as lpfc module entry point.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - FC attach transport failed
+ * all others - failed
+ */
static int __init
lpfc_init(void)
{
@@ -2567,12 +3463,20 @@ lpfc_init(void)
error = pci_register_driver(&lpfc_driver);
if (error) {
fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
}
return error;
}
+/**
+ * lpfc_exit: lpfc module removal routine.
+ *
+ * This routine is invoked when the lpfc module is removed from the kernel.
+ * The special kernel macro module_exit() is used to indicate the role of
+ * this routine to the kernel as lpfc module exit point.
+ */
static void __exit
lpfc_exit(void)
{
@@ -2580,6 +3484,19 @@ lpfc_exit(void)
fc_release_transport(lpfc_transport_template);
if (lpfc_enable_npiv)
fc_release_transport(lpfc_vport_transport_template);
+ if (_dump_buf_data) {
+ printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
+ "at 0x%p\n",
+ (1L << _dump_buf_data_order), _dump_buf_data);
+ free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
+ }
+
+ if (_dump_buf_dif) {
+ printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
+ "at 0x%p\n",
+ (1L << _dump_buf_dif_order), _dump_buf_dif);
+ free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
+ }
}
module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 39fd2b843bec..a85b7c196bbc 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -27,6 +27,7 @@
#define LOG_FCP 0x40 /* FCP traffic history */
#define LOG_NODE 0x80 /* Node table events */
#define LOG_TEMP 0x100 /* Temperature sensor events */
+#define LOG_BG 0x200 /* BlockBuard events */
#define LOG_MISC 0x400 /* Miscellaneous events */
#define LOG_SLI 0x800 /* SLI events */
#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 7a9be4c5b7cb..34eeb086a667 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -37,10 +38,20 @@
#include "lpfc_crtn.h"
#include "lpfc_compat.h"
-/**********************************************/
-
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_dump_mem: Prepare a mailbox command for retrieving HBA's VPD memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping VPD memory mailbox command.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping HBA Vital Product
+ * Data (VPD) memory. This mailbox command is to be used for retrieving a
+ * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
+ * offset specified by the offset parameter.
+ **/
void
lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
{
@@ -65,10 +76,49 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
return;
}
-/**********************************************/
-/* lpfc_read_nv Issue a READ NVPARAM */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_dump_mem: Prepare a mailbox command for retrieving wakeup params.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * This function create a dump memory mailbox command to dump wake up
+ * parameters.
+ */
+void
+lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ void *ctx;
+
+ mb = &pmb->mb;
+ /* Save context so that we can restore after memset */
+ ctx = pmb->context2;
+
+ /* Setup to dump VPD region */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+ mb->mbxCommand = MBX_DUMP_MEMORY;
+ mb->mbxOwner = OWN_HOST;
+ mb->un.varDmp.cv = 1;
+ mb->un.varDmp.type = DMP_NV_PARAMS;
+ mb->un.varDmp.entry_index = 0;
+ mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
+ mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
+ mb->un.varDmp.co = 0;
+ mb->un.varDmp.resp_offset = 0;
+ pmb->context2 = ctx;
+ return;
+}
+
+/**
+ * lpfc_read_nv: Prepare a mailbox command for reading HBA's NVRAM param.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read NVRAM mailbox command returns the HBA's non-volatile parameters
+ * that are used as defaults when the Fibre Channel link is brought on-line.
+ *
+ * This routine prepares the mailbox command for reading information stored
+ * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
+ **/
void
lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -81,10 +131,19 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_config_async Issue a */
-/* MBX_ASYNC_EVT_ENABLE mailbox command */
-/**********************************************/
+/**
+ * lpfc_config_async: Prepare a mailbox command for enabling HBA async event.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @ring: ring number for the asynchronous event to be configured.
+ *
+ * The asynchronous event enable mailbox command is used to enable the
+ * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
+ * specifies the default ring to which events are posted.
+ *
+ * This routine prepares the mailbox command for enabling HBA asynchronous
+ * event support on a IOCB ring.
+ **/
void
lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
uint32_t ring)
@@ -99,10 +158,19 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
return;
}
-/**********************************************/
-/* lpfc_heart_beat Issue a HEART_BEAT */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_heart_beat: Prepare a mailbox command for heart beat.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The heart beat mailbox command is used to detect an unresponsive HBA, which
+ * is defined as any device where no error attention is sent and both mailbox
+ * and rings are not processed.
+ *
+ * This routine prepares the mailbox command for issuing a heart beat in the
+ * form of mailbox command to the HBA. The timely completion of the heart
+ * beat mailbox command indicates the health of the HBA.
+ **/
void
lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -115,10 +183,26 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_read_la Issue a READ LA */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_la: Prepare a mailbox command for reading HBA link attention.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @mp: DMA buffer memory for reading the link attention information into.
+ *
+ * The read link attention mailbox command is issued to read the Link Event
+ * Attention information indicated by the HBA port when the Link Event bit
+ * of the Host Attention (HSTATT) register is set to 1. A Link Event
+ * Attention occurs based on an exception detected at the Fibre Channel link
+ * interface.
+ *
+ * This routine prepares the mailbox command for reading HBA link attention
+ * information. A DMA memory has been set aside and address passed to the
+ * HBA through @mp for the HBA to DMA link attention information into the
+ * memory as part of the execution of the mailbox command.
+ *
+ * Return codes
+ * 0 - Success (currently always return 0)
+ **/
int
lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
{
@@ -143,10 +227,21 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
return (0);
}
-/**********************************************/
-/* lpfc_clear_la Issue a CLEAR LA */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_clear_la: Prepare a mailbox command for clearing HBA link attention.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The clear link attention mailbox command is issued to clear the link event
+ * attention condition indicated by the Link Event bit of the Host Attention
+ * (HSTATT) register. The link event attention condition is cleared only if
+ * the event tag specified matches that of the current link event counter.
+ * The current event tag is read using the read link attention event mailbox
+ * command.
+ *
+ * This routine prepares the mailbox command for clearing HBA link attention
+ * information.
+ **/
void
lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -161,10 +256,20 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**************************************************/
-/* lpfc_config_link Issue a CONFIG LINK */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_config_link: Prepare a mailbox command for configuring link on a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure link mailbox command is used before the initialize link
+ * mailbox command to override default value and to configure link-oriented
+ * parameters such as DID address and various timers. Typically, this
+ * command would be used after an F_Port login to set the returned DID address
+ * and the fabric timeout values. This command is not valid before a configure
+ * port command has configured the HBA port.
+ *
+ * This routine prepares the mailbox command for configuring link on a HBA.
+ **/
void
lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -199,10 +304,98 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/**********************************************/
-/* lpfc_init_link Issue an INIT LINK */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_config_msi: Prepare a mailbox command for configuring msi-x.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
+ * MSI-X multi-message interrupt vector association to interrupt attention
+ * conditions.
+ *
+ * Return codes
+ * 0 - Success
+ * -EINVAL - Failure
+ **/
+int
+lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb = &pmb->mb;
+ uint32_t attentionConditions[2];
+
+ /* Sanity check */
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0475 Not configured for supporting MSI-X "
+ "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
+ return -EINVAL;
+ }
+
+ if (phba->sli_rev < 3) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0476 HBA not supporting SLI-3 or later "
+ "SLI Revision: 0x%x\n", phba->sli_rev);
+ return -EINVAL;
+ }
+
+ /* Clear mailbox command fields */
+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+
+ /*
+ * SLI-3, Message Signaled Interrupt Fearure.
+ */
+
+ /* Multi-message attention configuration */
+ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
+ HA_LATT | HA_MBATT);
+ attentionConditions[1] = 0;
+
+ mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
+ mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
+
+ /*
+ * Set up message number to HA bit association
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
+#else /* __LITTLE_ENDIAN_BITFIELD */
+ /* RA0 (FCP Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
+ /* RA1 (Other Protocol Extra Ring) */
+ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
+#endif
+ /* Multi-message interrupt autoclear configuration*/
+ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
+ mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
+
+ /* For now, HBA autoclear does not work reliably, disable it */
+ mb->un.varCfgMSI.autoClearHA[0] = 0;
+ mb->un.varCfgMSI.autoClearHA[1] = 0;
+
+ /* Set command and owner bit */
+ mb->mbxCommand = MBX_CONFIG_MSI;
+ mb->mbxOwner = OWN_HOST;
+
+ return 0;
+}
+
+/**
+ * lpfc_init_link: Prepare a mailbox command for initialize link on a HBA.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @topology: the link topology for the link to be initialized to.
+ * @linkspeed: the link speed for the link to be initialized to.
+ *
+ * The initialize link mailbox command is used to initialize the Fibre
+ * Channel link. This command must follow a configure port command that
+ * establishes the mode of operation.
+ *
+ * This routine prepares the mailbox command for initializing link on a HBA
+ * with the specified link topology and speed.
+ **/
void
lpfc_init_link(struct lpfc_hba * phba,
LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
@@ -269,10 +462,27 @@ lpfc_init_link(struct lpfc_hba * phba,
return;
}
-/**********************************************/
-/* lpfc_read_sparam Issue a READ SPARAM */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_sparam: Prepare a mailbox command for reading HBA parameters.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @vpi: virtual N_Port identifier.
+ *
+ * The read service parameter mailbox command is used to read the HBA port
+ * service parameters. The service parameters are read into the buffer
+ * specified directly by a BDE in the mailbox command. These service
+ * parameters may then be used to build the payload of an N_Port/F_POrt
+ * login request and reply (LOGI/ACC).
+ *
+ * This routine prepares the mailbox command for reading HBA port service
+ * parameters. The DMA memory is allocated in this function and the addresses
+ * are populated into the mailbox command for the HBA to DMA the service
+ * parameters into.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
int
lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
@@ -312,10 +522,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
return (0);
}
-/********************************************/
-/* lpfc_unreg_did Issue a UNREG_DID */
-/* mailbox command */
-/********************************************/
+/**
+ * lpfc_unreg_did: Prepare a mailbox command for unregistering DID.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregister DID mailbox command is used to unregister an N_Port/F_Port
+ * login for an unknown RPI by specifying the DID of a remote port. This
+ * command frees an RPI context in the HBA port. This has the effect of
+ * performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering a remote
+ * N_Port/F_Port (DID) login.
+ **/
void
lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
LPFC_MBOXQ_t * pmb)
@@ -333,10 +554,19 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
return;
}
-/**********************************************/
-/* lpfc_read_nv Issue a READ CONFIG */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_read_config: Prepare a mailbox command for reading HBA configuration.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read configuration mailbox command is used to read the HBA port
+ * configuration parameters. This mailbox command provides a method for
+ * seeing any parameters that may have changed via various configuration
+ * mailbox commands.
+ *
+ * This routine prepares the mailbox command for reading out HBA configuration
+ * parameters.
+ **/
void
lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -350,10 +580,18 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/*************************************************/
-/* lpfc_read_lnk_stat Issue a READ LINK STATUS */
-/* mailbox command */
-/*************************************************/
+/**
+ * lpfc_read_lnk_stat: Prepare a mailbox command for reading HBA link stats.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read link status mailbox command is used to read the link status from
+ * the HBA. Link status includes all link-related error counters. These
+ * counters are maintained by the HBA and originated in the link hardware
+ * unit. Note that all of these counters wrap.
+ *
+ * This routine prepares the mailbox command for reading out HBA link status.
+ **/
void
lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -367,10 +605,30 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
-/********************************************/
-/* lpfc_reg_login Issue a REG_LOGIN */
-/* mailbox command */
-/********************************************/
+/**
+ * lpfc_reg_login: Prepare a mailbox command for registering remote login.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @did: remote port identifier.
+ * @param: pointer to memory holding the server parameters.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @flag: action flag to be passed back for the complete function.
+ *
+ * The registration login mailbox command is used to register an N_Port or
+ * F_Port login. This registration allows the HBA to cache the remote N_Port
+ * service parameters internally and thereby make the appropriate FC-2
+ * decisions. The remote port service parameters are handed off by the driver
+ * to the HBA using a descriptor entry that directly identifies a buffer in
+ * host memory. In exchange, the HBA returns an RPI identifier.
+ *
+ * This routine prepares the mailbox command for registering remote port login.
+ * The function allocates DMA buffer for passing the service parameters to the
+ * HBA with the mailbox command.
+ *
+ * Return codes
+ * 0 - Success
+ * 1 - DMA memory allocation failed
+ **/
int
lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
@@ -418,10 +676,20 @@ lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
return (0);
}
-/**********************************************/
-/* lpfc_unreg_login Issue a UNREG_LOGIN */
-/* mailbox command */
-/**********************************************/
+/**
+ * lpfc_unreg_login: Prepare a mailbox command for unregistering remote login.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @rpi: remote port identifier
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration login mailbox command is used to unregister an N_Port
+ * or F_Port login. This command frees an RPI context in the HBA. It has the
+ * effect of performing an implicit N_Port/F_Port logout.
+ *
+ * This routine prepares the mailbox command for unregistering remote port
+ * login.
+ **/
void
lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
LPFC_MBOXQ_t * pmb)
@@ -440,10 +708,21 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
return;
}
-/**************************************************/
-/* lpfc_reg_vpi Issue a REG_VPI */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_reg_vpi: Prepare a mailbox command for registering vport identifier.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The registration vport identifier mailbox command is used to activate a
+ * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
+ * N_Port_ID against the information in the selected virtual N_Port context
+ * block and marks it active to allow normal processing of IOCB commands and
+ * received unsolicited exchanges.
+ *
+ * This routine prepares the mailbox command for registering a virtual N_Port.
+ **/
void
lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
LPFC_MBOXQ_t *pmb)
@@ -461,10 +740,22 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
}
-/**************************************************/
-/* lpfc_unreg_vpi Issue a UNREG_VNPI */
-/* mailbox command */
-/**************************************************/
+/**
+ * lpfc_unreg_vpi: Prepare a mailbox command for unregistering vport id.
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: virtual N_Port identifier.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The unregistration vport identifier mailbox command is used to inactivate
+ * a virtual N_Port. The driver must have logged out and unregistered all
+ * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
+ * unregisters any default RPIs associated with the specified vpi, aborting
+ * any active exchanges. The HBA will post the mailbox response after making
+ * the virtual N_Port inactive.
+ *
+ * This routine prepares the mailbox command for unregistering a virtual
+ * N_Port.
+ **/
void
lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
{
@@ -479,12 +770,19 @@ lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
}
+/**
+ * lpfc_config_pcb_setup: Set up IOCB rings in the Port Control Block (PCB)
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets up and initializes the IOCB rings in the Port Control
+ * Block (PCB).
+ **/
static void
lpfc_config_pcb_setup(struct lpfc_hba * phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- PCB_t *pcbp = &phba->slim2p->pcb;
+ PCB_t *pcbp = phba->pcb;
dma_addr_t pdma_addr;
uint32_t offset;
uint32_t iocbCnt = 0;
@@ -513,29 +811,43 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
continue;
}
/* Command ring setup for ring */
- pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
+ pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
pcbp->rdsc[i].cmdEntries = pring->numCiocb;
- offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *) phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
+ (uint8_t *) phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numCiocb;
/* Response ring setup for ring */
- pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
+ pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
pcbp->rdsc[i].rspEntries = pring->numRiocb;
- offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
- (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
+ (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
iocbCnt += pring->numRiocb;
}
}
+/**
+ * lpfc_read_rev: Prepare a mailbox command for reading HBA revision.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The read revision mailbox command is used to read the revision levels of
+ * the HBA components. These components include hardware units, resident
+ * firmware, and available firmware. HBAs that supports SLI-3 mode of
+ * operation provide different response information depending on the version
+ * requested by the driver.
+ *
+ * This routine prepares the mailbox command for reading HBA revision
+ * information.
+ **/
void
lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -548,6 +860,16 @@ lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_build_hbq_profile2: Set up the HBQ Selection Profile 2.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
+ * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
+ * the Sequence Length Test using the fields in the Selection Profile 2
+ * extension in words 20:31.
+ **/
static void
lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -557,6 +879,16 @@ lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
}
+/**
+ * lpfc_build_hbq_profile3: Set up the HBQ Selection Profile 3.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
+ * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
+ * the Sequence Length Test and Byte Field Test using the fields in the
+ * Selection Profile 3 extension in words 20:31.
+ **/
static void
lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -569,6 +901,17 @@ lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile3.cmdmatch));
}
+/**
+ * lpfc_build_hbq_profile5: Set up the HBQ Selection Profile 5.
+ * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
+ * @hbq_desc: pointer to the HBQ selection profile descriptor.
+ *
+ * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
+ * HBA tests the initial frame of an incoming sequence using the frame's
+ * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
+ * and Byte Field Test using the fields in the Selection Profile 5 extension
+ * words 20:31.
+ **/
static void
lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
struct lpfc_hbq_init *hbq_desc)
@@ -581,6 +924,20 @@ lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
sizeof(hbqmb->profiles.profile5.cmdmatch));
}
+/**
+ * lpfc_config_hbq: Prepare a mailbox command for configuring an HBQ.
+ * @phba: pointer to lpfc hba data structure.
+ * @id: HBQ identifier.
+ * @hbq_desc: pointer to the HBA descriptor data structure.
+ * @hbq_entry_index: index of the HBQ entry data structures.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
+ * an HBQ. The configuration binds events that require buffers to a particular
+ * ring and HBQ based on a selection profile.
+ *
+ * This routine prepares the mailbox command for configuring an HBQ.
+ **/
void
lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
struct lpfc_hbq_init *hbq_desc,
@@ -641,8 +998,23 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
return;
}
-
-
+/**
+ * lpfc_config_ring: Prepare a mailbox command for configuring an IOCB ring.
+ * @phba: pointer to lpfc hba data structure.
+ * @ring:
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure ring mailbox command is used to configure an IOCB ring. This
+ * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
+ * ring. This is used to map incoming sequences to a particular ring whose
+ * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
+ * attempt to configure a ring whose number is greater than the number
+ * specified in the Port Control Block (PCB). It is an error to issue the
+ * configure ring command more than once with the same ring number. The HBA
+ * returns an error if the driver attempts this.
+ *
+ * This routine prepares the mailbox command for configuring IOCB ring.
+ **/
void
lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
{
@@ -684,6 +1056,20 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_config_port: Prepare a mailbox command for configuring port.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The configure port mailbox command is used to identify the Port Control
+ * Block (PCB) in the driver memory. After this command is issued, the
+ * driver must not access the mailbox in the HBA without first resetting
+ * the HBA. The HBA may copy the PCB information to internal storage for
+ * subsequent use; the driver can not change the PCB information unless it
+ * resets the HBA.
+ *
+ * This routine prepares the mailbox command for configuring port.
+ **/
void
lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -702,21 +1088,27 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
- offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
+ offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
+ /* Always Host Group Pointer is in SLIM */
+ mb->un.varCfgPort.hps = 1;
+
/* If HBA supports SLI=3 ask for it */
if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+ if (phba->cfg_enable_bg)
+ mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
+ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
+ mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
if (phba->max_vpi && phba->cfg_enable_npiv &&
phba->vpd.sli3Feat.cmv) {
mb->un.varCfgPort.max_vpi = phba->max_vpi;
mb->un.varCfgPort.cmv = 1;
- phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
} else
mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
} else
@@ -724,16 +1116,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->un.varCfgPort.sli_mode = phba->sli_rev;
/* Now setup pcb */
- phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
- phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
+ phba->pcb->type = TYPE_NATIVE_SLI2;
+ phba->pcb->feature = FEATURE_INITIAL_SLI2;
/* Setup Mailbox pointers */
- phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
- sizeof(struct sli2_desc);
- offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
- pdma_addr = phba->slim2p_mapping + offset;
- phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
- phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
+ offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
/*
* Setup Host Group ring pointer.
@@ -794,13 +1185,13 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* mask off BAR0's flag bits 0 - 3 */
- phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
- (void __iomem *) phba->host_gp -
+ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
+ (void __iomem *)phba->host_gp -
(void __iomem *)phba->MBslimaddr;
if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
- phba->slim2p->pcb.hgpAddrHigh = bar_high;
+ phba->pcb->hgpAddrHigh = bar_high;
else
- phba->slim2p->pcb.hgpAddrHigh = 0;
+ phba->pcb->hgpAddrHigh = 0;
/* write HGP data to SLIM at the required longword offset */
memset(&hgp, 0, sizeof(struct lpfc_hgp));
@@ -809,18 +1200,15 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
sizeof(*phba->host_gp));
}
- /* Setup Port Group ring pointer */
+ /* Setup Port Group offset */
if (phba->sli_rev == 3)
- pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
- (uint8_t *)phba->slim2p;
+ pgp_offset = offsetof(struct lpfc_sli2_slim,
+ mbx.us.s3_pgp.port);
else
- pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
- (uint8_t *)phba->slim2p;
-
- pdma_addr = phba->slim2p_mapping + pgp_offset;
- phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
- phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
- phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
+ pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
+ pdma_addr = phba->slim2p.phys + pgp_offset;
+ phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
+ phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
/* Use callback routine to setp rings in the pcb */
lpfc_config_pcb_setup(phba);
@@ -835,10 +1223,24 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Swap PCB if needed */
- lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
- sizeof(PCB_t));
+ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
}
+/**
+ * lpfc_kill_board: Prepare a mailbox command for killing board.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * The kill board mailbox command is used to tell firmware to perform a
+ * graceful shutdown of a channel on a specified board to prepare for reset.
+ * When the kill board mailbox command is received, the ER3 bit is set to 1
+ * in the Host Status register and the ER Attention bit is set to 1 in the
+ * Host Attention register of the HBA function that received the kill board
+ * command.
+ *
+ * This routine prepares the mailbox command for killing the board in
+ * preparation for a graceful shutdown.
+ **/
void
lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
{
@@ -850,6 +1252,16 @@ lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
return;
}
+/**
+ * lpfc_mbox_put: Put a mailbox cmd into the tail of driver's mailbox queue.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time.
+ **/
void
lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
@@ -864,6 +1276,20 @@ lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
return;
}
+/**
+ * lpfc_mbox_get: Remove a mailbox cmd from the head of driver's mailbox queue.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Driver maintains a internal mailbox command queue implemented as a linked
+ * list. When a mailbox command is issued, it shall be put into the mailbox
+ * command queue such that they shall be processed orderly as HBA can process
+ * one mailbox command at a time. After HBA finished processing a mailbox
+ * command, the driver will remove a pending mailbox command from the head of
+ * the mailbox command queue and send to the HBA for processing.
+ *
+ * Return codes
+ * pointer to the driver internal queue element for mailbox command.
+ **/
LPFC_MBOXQ_t *
lpfc_mbox_get(struct lpfc_hba * phba)
{
@@ -877,16 +1303,40 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
+/**
+ * lpfc_mbox_cmpl_put: Put mailbox command into mailbox command complete list.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This routine is called from driver interrupt handler
+ * context.The mailbox complete list is used by the driver worker thread
+ * to process mailbox complete callback functions outside the driver interrupt
+ * handler.
+ **/
void
lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
{
+ unsigned long iflag;
+
/* This function expects to be called from interrupt context */
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
+/**
+ * lpfc_mbox_tmo_val: Retrieve mailbox command timeout value.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmd: mailbox command code.
+ *
+ * This routine retrieves the proper timeout value according to the mailbox
+ * command code.
+ *
+ * Return codes
+ * Timeout value to be used for the given mailbox command
+ **/
int
lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 3c0cebc71800..a4bba2069248 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2006 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -39,7 +40,21 @@
#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
-
+/**
+ * lpfc_mem_alloc: create and allocate all PCI and memory pools
+ * @phba: HBA to allocate pools for
+ *
+ * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
+ * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools
+ * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. If any
+ * allocation fails, frees all successfully allocated memory before returning.
+ *
+ * Returns:
+ * 0 on success
+ * -ENOMEM on failure (if any memory allocations fail)
+ **/
int
lpfc_mem_alloc(struct lpfc_hba * phba)
{
@@ -120,6 +135,16 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
return -ENOMEM;
}
+/**
+ * lpfc_mem_free: Frees all PCI and memory allocated by lpfc_mem_alloc
+ * @phba: HBA to free memory for
+ *
+ * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
+ * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
+ * lpfc_nodelist. Also frees the VPI bitmask.
+ *
+ * Returns: None
+ **/
void
lpfc_mem_free(struct lpfc_hba * phba)
{
@@ -181,12 +206,29 @@ lpfc_mem_free(struct lpfc_hba * phba)
phba->lpfc_scsi_dma_buf_pool = NULL;
phba->lpfc_mbuf_pool = NULL;
- /* Free the iocb lookup array */
+ /* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
-
}
+/**
+ * lpfc_mbuf_alloc: Allocate an mbuf from the lpfc_mbuf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the mbuf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
+ * Allocates from generic pci_pool_alloc function first and if that fails and
+ * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
+ * HBA's pool.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held. Takes
+ * phba->hbalock.
+ *
+ * Returns:
+ * pointer to the allocated mbuf on success
+ * NULL on failure
+ **/
void *
lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
{
@@ -206,6 +248,20 @@ lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
return ret;
}
+/**
+ * __lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Must be called with phba->hbalock held to synchronize access to
+ * lpfc_mbuf_safety_pool.
+ *
+ * Returns: None
+ **/
void
__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
@@ -221,7 +277,21 @@ __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
+/**
+ * lpfc_mem_free: Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
+ * @phba: HBA which owns the pool to return to
+ * @virt: mbuf to free
+ * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
+ *
+ * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
+ * it is below its max_count, frees the mbuf otherwise.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
void
+
lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
{
unsigned long iflags;
@@ -232,6 +302,19 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
return;
}
+/**
+ * lpfc_els_hbq_alloc: Allocate an HBQ buffer
+ * @phba: HBA to allocate HBQ buffer for
+ *
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
struct hbq_dmabuf *
lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
@@ -251,6 +334,18 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
return hbqbp;
}
+/**
+ * lpfc_mem_hbq_free: Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * @phba: HBA buffer was allocated for
+ * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffer returned by
+ * lpfc_els_hbq_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
void
lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
{
@@ -259,7 +354,18 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
return;
}
-/* This is ONLY called for the LPFC_ELS_HBQ */
+/**
+ * lpfc_in_buf_free: Free a DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given if the
+ * HBA is running in SLI3 mode with HBQs enabled.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
void
lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
{
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
new file mode 100644
index 000000000000..27d1a88a98fe
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -0,0 +1,179 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2008 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+/* Event definitions for RegisterForEvent */
+#define FC_REG_LINK_EVENT 0x0001 /* link up / down events */
+#define FC_REG_RSCN_EVENT 0x0002 /* RSCN events */
+#define FC_REG_CT_EVENT 0x0004 /* CT request events */
+#define FC_REG_DUMP_EVENT 0x0010 /* Dump events */
+#define FC_REG_TEMPERATURE_EVENT 0x0020 /* temperature events */
+#define FC_REG_VPORTRSCN_EVENT 0x0040 /* Vport RSCN events */
+#define FC_REG_ELS_EVENT 0x0080 /* lpfc els events */
+#define FC_REG_FABRIC_EVENT 0x0100 /* lpfc fabric events */
+#define FC_REG_SCSI_EVENT 0x0200 /* lpfc scsi events */
+#define FC_REG_BOARD_EVENT 0x0400 /* lpfc board events */
+#define FC_REG_ADAPTER_EVENT 0x0800 /* lpfc adapter events */
+#define FC_REG_EVENT_MASK (FC_REG_LINK_EVENT | \
+ FC_REG_RSCN_EVENT | \
+ FC_REG_CT_EVENT | \
+ FC_REG_DUMP_EVENT | \
+ FC_REG_TEMPERATURE_EVENT | \
+ FC_REG_VPORTRSCN_EVENT | \
+ FC_REG_ELS_EVENT | \
+ FC_REG_FABRIC_EVENT | \
+ FC_REG_SCSI_EVENT | \
+ FC_REG_BOARD_EVENT | \
+ FC_REG_ADAPTER_EVENT)
+/* Temperature events */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+/*
+ * All net link event payloads will begin with and event type
+ * and subcategory. The event type must come first.
+ * The subcategory further defines the data that follows in the rest
+ * of the payload. Each category will have its own unique header plus
+ * any addtional data unique to the subcategory.
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
+/* RSCN event header */
+struct lpfc_rscn_event_header {
+ uint32_t event_type;
+ uint32_t payload_length; /* RSCN data length in bytes */
+ uint32_t rscn_payload[];
+};
+
+/* els event header */
+struct lpfc_els_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_ELS_EVENT */
+#define LPFC_EVENT_PLOGI_RCV 0x01
+#define LPFC_EVENT_PRLO_RCV 0x02
+#define LPFC_EVENT_ADISC_RCV 0x04
+#define LPFC_EVENT_LSRJT_RCV 0x08
+#define LPFC_EVENT_LOGO_RCV 0x10
+
+/* special els lsrjt event */
+struct lpfc_lsrjt_event {
+ struct lpfc_els_event_header header;
+ uint32_t command;
+ uint32_t reason_code;
+ uint32_t explanation;
+};
+
+/* special els logo event */
+struct lpfc_logo_event {
+ struct lpfc_els_event_header header;
+ uint8_t logo_wwpn[8];
+};
+
+/* fabric event header */
+struct lpfc_fabric_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_FABRIC_EVENT */
+#define LPFC_EVENT_FABRIC_BUSY 0x01
+#define LPFC_EVENT_PORT_BUSY 0x02
+#define LPFC_EVENT_FCPRDCHKERR 0x04
+
+/* special case fabric fcprdchkerr event */
+struct lpfc_fcprdchkerr_event {
+ struct lpfc_fabric_event_header header;
+ uint32_t lun;
+ uint32_t opcode;
+ uint32_t fcpiparam;
+};
+
+
+/* scsi event header */
+struct lpfc_scsi_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+ uint32_t lun;
+ uint8_t wwpn[8];
+ uint8_t wwnn[8];
+};
+
+/* subcategory codes for FC_REG_SCSI_EVENT */
+#define LPFC_EVENT_QFULL 0x0001
+#define LPFC_EVENT_DEVBSY 0x0002
+#define LPFC_EVENT_CHECK_COND 0x0004
+#define LPFC_EVENT_LUNRESET 0x0008
+#define LPFC_EVENT_TGTRESET 0x0010
+#define LPFC_EVENT_BUSRESET 0x0020
+#define LPFC_EVENT_VARQUEDEPTH 0x0040
+
+/* special case scsi varqueuedepth event */
+struct lpfc_scsi_varqueuedepth_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint32_t oldval;
+ uint32_t newval;
+};
+
+/* special case scsi check condition event */
+struct lpfc_scsi_check_condition_event {
+ struct lpfc_scsi_event_header scsi_event;
+ uint8_t opcode;
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+};
+
+/* event codes for FC_REG_BOARD_EVENT */
+#define LPFC_EVENT_PORTINTERR 0x01
+
+/* board event header */
+struct lpfc_board_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for FC_REG_ADAPTER_EVENT */
+#define LPFC_EVENT_ARRIVAL 0x01
+
+/* adapter event header */
+struct lpfc_adapter_event_header {
+ uint32_t event_type;
+ uint32_t subcategory;
+};
+
+
+/* event codes for temp_event */
+#define LPFC_CRIT_TEMP 0x1
+#define LPFC_THRESHOLD_TEMP 0x2
+#define LPFC_NORMAL_TEMP 0x3
+
+struct temp_event {
+ uint32_t event_type;
+ uint32_t event_code;
+ uint32_t data;
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6688a8689b56..8f548adae9cc 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -30,6 +30,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -1003,20 +1004,8 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
-
- if (vport->num_disc_nodes) {
+ if (vport->num_disc_nodes)
lpfc_more_adisc(vport);
- if ((vport->num_disc_nodes == 0) &&
- (vport->fc_npr_cnt))
- lpfc_els_disc_plogi(vport);
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
- }
- }
}
return ndlp->nlp_state;
}
@@ -1865,8 +1854,13 @@ static uint32_t
lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ if (ndlp->nlp_DID == Fabric_DID) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_unreg_rpi(vport, ndlp);
- /* This routine does nothing, just return the current state */
return ndlp->nlp_state;
}
@@ -1935,10 +1929,10 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (vport->fc_flag & FC_RSCN_DEFERRED)
return ndlp->nlp_state;
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
return ndlp->nlp_state;
}
@@ -2155,7 +2149,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nlp_put(ndlp);
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0212 DSM out state %d on NPort free\n", rc);
+ "0213 DSM out state %d on NPort free\n", rc);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1bcebbd3dfac..b103b6ed4970 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -18,13 +18,14 @@
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
-
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
@@ -32,6 +33,7 @@
#include "lpfc_version.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -42,12 +44,194 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
-/*
- * This function is called with no lock held when there is a resource
- * error in driver or in firmware.
- */
+int _dump_buf_done;
+
+static char *dif_op_str[] = {
+ "SCSI_PROT_NORMAL",
+ "SCSI_PROT_READ_INSERT",
+ "SCSI_PROT_WRITE_STRIP",
+ "SCSI_PROT_READ_STRIP",
+ "SCSI_PROT_WRITE_INSERT",
+ "SCSI_PROT_READ_PASS",
+ "SCSI_PROT_WRITE_PASS",
+ "SCSI_PROT_READ_CONVERT",
+ "SCSI_PROT_WRITE_CONVERT"
+};
+
+static void
+lpfc_debug_save_data(struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_sglist(cmnd);
+
+ if (!_dump_buf_data) {
+ printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+
+ if (!sgde) {
+ printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
+ return;
+ }
+
+ dst = (void *) _dump_buf_data;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
+static void
+lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
+{
+ void *src, *dst;
+ struct scatterlist *sgde = scsi_prot_sglist(cmnd);
+
+ if (!_dump_buf_dif) {
+ printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
+ __func__);
+ return;
+ }
+
+ if (!sgde) {
+ printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
+ return;
+ }
+
+ dst = _dump_buf_dif;
+ while (sgde) {
+ src = sg_virt(sgde);
+ memcpy(dst, src, sgde->length);
+ dst += sgde->length;
+ sgde = sg_next(sgde);
+ }
+}
+
+/**
+ * lpfc_update_stats: Update statistical data for the command completion.
+ * @phba: Pointer to HBA object.
+ * @lpfc_cmd: lpfc scsi command object pointer.
+ *
+ * This function is called when there is a command completion and this
+ * function updates the statistical data for the command completion.
+ **/
+static void
+lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *pnode = rdata->pnode;
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ unsigned long flags;
+ struct Scsi_Host *shost = cmd->device->host;
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ unsigned long latency;
+ int i;
+
+ if (cmd->result)
+ return;
+
+ latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return;
+ }
+
+ if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
+ i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
+ phba->bucket_step;
+ /* check array subscript bounds */
+ if (i < 0)
+ i = 0;
+ else if (i >= LPFC_MAX_BUCKET_COUNT)
+ i = LPFC_MAX_BUCKET_COUNT - 1;
+ } else {
+ for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
+ if (latency <= (phba->bucket_base +
+ ((1<<i)*phba->bucket_step)))
+ break;
+ }
+
+ pnode->lat_data[i].cmd_count++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
+ * event.
+ * @phba: Pointer to HBA context object.
+ * @vport: Pointer to vport object.
+ * @ndlp: Pointer to FC node associated with the target.
+ * @lun: Lun number of the scsi device.
+ * @old_val: Old value of the queue depth.
+ * @new_val: New value of the queue depth.
+ *
+ * This function sends an event to the mgmt application indicating
+ * there is a change in the scsi device queue depth.
+ **/
+static void
+lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
+ struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ uint32_t lun,
+ uint32_t old_val,
+ uint32_t new_val)
+{
+ struct lpfc_fast_path_event *fast_path_evt;
+ unsigned long flags;
+
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+
+ fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
+ LPFC_EVENT_VARQUEDEPTH;
+
+ /* Report all luns with change in queue depth */
+ fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
+ &ndlp->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
+ &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+ }
+
+ fast_path_evt->un.queue_depth_evt.oldval = old_val;
+ fast_path_evt->un.queue_depth_evt.newval = new_val;
+ fast_path_evt->vport = vport;
+
+ fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+
+ return;
+}
+
+/**
+ * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called when there is resource error in driver or firmware.
+ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
+ * posts at most 1 event each second. This routine wakes up worker thread of
+ * @phba to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
void
-lpfc_adjust_queue_depth(struct lpfc_hba *phba)
+lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
{
unsigned long flags;
uint32_t evt_posted;
@@ -76,10 +260,17 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
return;
}
-/*
- * This function is called with no lock held when there is a successful
- * SCSI command completion.
- */
+/**
+ * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
+ * post at most 1 event every 5 minute after last_ramp_up_time or
+ * last_rsrc_error_time. This routine wakes up worker thread of @phba
+ * to process WORKER_RAM_DOWN_EVENT event.
+ *
+ * This routine should be called with no lock held.
+ **/
static inline void
lpfc_rampup_queue_depth(struct lpfc_vport *vport,
struct scsi_device *sdev)
@@ -111,15 +302,24 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
return;
}
+/**
+ * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
+ * thread.This routine reduces queue depth for all scsi device on each vport
+ * associated with @phba.
+ **/
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct scsi_device *sdev;
- unsigned long new_queue_depth;
+ unsigned long new_queue_depth, old_queue_depth;
unsigned long num_rsrc_err, num_cmd_success;
int i;
+ struct lpfc_rport_data *rdata;
num_rsrc_err = atomic_read(&phba->num_rsrc_err);
num_cmd_success = atomic_read(&phba->num_cmd_success);
@@ -137,6 +337,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
else
new_queue_depth = sdev->queue_depth -
new_queue_depth;
+ old_queue_depth = sdev->queue_depth;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
@@ -145,6 +346,13 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
new_queue_depth);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun, old_queue_depth,
+ new_queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -152,6 +360,15 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
+/**
+ * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
+ * thread.This routine increases queue depth for all scsi device on each vport
+ * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
+ * num_cmd_success to zero.
+ **/
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
@@ -159,6 +376,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
struct Scsi_Host *shost;
struct scsi_device *sdev;
int i;
+ struct lpfc_rport_data *rdata;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -176,6 +394,14 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
scsi_adjust_queue_depth(sdev,
MSG_SIMPLE_TAG,
sdev->queue_depth+1);
+ rdata = sdev->hostdata;
+ if (rdata)
+ lpfc_send_sdev_queuedepth_change_event(
+ phba, vports[i],
+ rdata->pnode,
+ sdev->lun,
+ sdev->queue_depth - 1,
+ sdev->queue_depth);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -183,14 +409,50 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
atomic_set(&phba->num_cmd_success, 0);
}
-/*
+/**
+ * lpfc_scsi_dev_block: set all scsi hosts to block state.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function walks vport list and set each SCSI host to block state
+ * by invoking fc_remote_port_delete() routine. This function is invoked
+ * with EEH when device's PCI slot has been permanently disabled.
+ **/
+void
+lpfc_scsi_dev_block(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct fc_rport *rport;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL)
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ shost_for_each_device(sdev, shost) {
+ rport = starget_to_rport(scsi_target(sdev));
+ fc_remote_port_delete(rport);
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
+ * lpfc_new_scsi_buf: Scsi buffer allocator.
+ * @vport: The virtual port for which this call being executed.
+ *
* This routine allocates a scsi buffer, which contains all the necessary
* information needed to initiate a SCSI I/O. The non-DMAable buffer region
* contains information to build the IOCB. The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
- * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
+ * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
+ * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
* and the BPL BDE is setup in the IOCB.
- */
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf data structure - Success
+ **/
static struct lpfc_scsi_buf *
lpfc_new_scsi_buf(struct lpfc_vport *vport)
{
@@ -198,7 +460,9 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb;
struct ulp_bde64 *bpl;
IOCB_t *iocb;
- dma_addr_t pdma_phys;
+ dma_addr_t pdma_phys_fcp_cmd;
+ dma_addr_t pdma_phys_fcp_rsp;
+ dma_addr_t pdma_phys_bpl;
uint16_t iotag;
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -238,45 +502,77 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
/* Initialize local short-hand pointers. */
bpl = psb->fcp_bpl;
- pdma_phys = psb->dma_handle;
+ pdma_phys_fcp_cmd = psb->dma_handle;
+ pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+ pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp);
/*
* The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
* list bdes. Initialize the first two and leave the rest for
* queuecommand.
*/
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
- bpl->tus.f.bdeFlags = BUFF_USE_CMND;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
+ bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+ bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+ bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+ bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
/* Setup the physical region for the FCP RSP */
- pdma_phys += sizeof (struct fcp_cmnd);
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
- bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
- bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+ bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+ bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+ bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
/*
* Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
* initialize it with all known data now.
*/
- pdma_phys += (sizeof (struct fcp_rsp));
iocb = &psb->cur_iocbq.iocb;
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
- iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
- iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
- iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
- iocb->ulpBdeCount = 1;
+ if ((phba->sli_rev == 3) &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+ /* fill in immediate fcp command BDE */
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+ iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+ unsli3.fcp_ext.icd);
+ iocb->un.fcpi64.bdl.addrHigh = 0;
+ iocb->ulpBdeCount = 0;
+ iocb->ulpLe = 0;
+ /* fill in responce BDE */
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+ sizeof(struct fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrLow =
+ putPaddrLow(pdma_phys_fcp_rsp);
+ iocb->unsli3.fcp_ext.rbde.addrHigh =
+ putPaddrHigh(pdma_phys_fcp_rsp);
+ } else {
+ iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
+ iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
+ iocb->ulpBdeCount = 1;
+ iocb->ulpLe = 1;
+ }
iocb->ulpClass = CLASS3;
return psb;
}
+/**
+ * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_scsi_buf - Success
+ **/
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba)
{
@@ -289,11 +585,20 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
if (lpfc_cmd) {
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
+ lpfc_cmd->prot_seg_cnt = 0;
}
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
return lpfc_cmd;
}
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
static void
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
@@ -305,6 +610,20 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
}
+/**
+ * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd. This routine scans through sg elements and format the
+ * bdea. This routine also initializes all IOCB fields which are dependent on
+ * scsi command request buffer.
+ *
+ * Return codes:
+ * 1 - Error
+ * 0 - Success
+ **/
static int
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
@@ -313,8 +632,9 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
- uint32_t i, num_bde = 0;
+ uint32_t num_bde = 0;
int nseg, datadir = scsi_cmnd->sc_data_direction;
/*
@@ -340,7 +660,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
printk(KERN_ERR "%s: Too many sg segments from "
- "dma_map_sg. Config %d, seg_cnt %d",
+ "dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
scsi_dma_unmap(scsi_cmnd);
@@ -352,19 +672,581 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* during probe that limits the number of sg elements in any
* single scsi command. Just run through the seg_cnt and format
* the bde's.
+ * When using SLI-3 the driver will try to fit all the BDEs into
+ * the IOCB. If it can't then the BDEs get added to a BPL as it
+ * does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
+ scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
- bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ nseg <= LPFC_EXT_DATA_BDE_COUNT) {
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ data_bde->tus.f.bdeSize = sg_dma_len(sgel);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde++;
+ } else {
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(physaddr));
+ bpl++;
+ }
+ }
+ }
+
+ /*
+ * Finish initializing those IOCB fields that are dependent on the
+ * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
+ * explicitly reinitialized and for SLI-3 the extended bde count is
+ * explicitly reinitialized since all iocb memory resources are reused.
+ */
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+ if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
+ /*
+ * The extended IOCB format can only fit 3 BDE or a BPL.
+ * This I/O has more than 3 BDE so the 1st data bde will
+ * be a BPL that is filled in here.
+ */
+ physaddr = lpfc_cmd->dma_handle;
+ data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ data_bde->tus.f.bdeSize = (num_bde *
+ sizeof(struct ulp_bde64));
+ physaddr += (sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ (2 * sizeof(struct ulp_bde64)));
+ data_bde->addrHigh = putPaddrHigh(physaddr);
+ data_bde->addrLow = putPaddrLow(physaddr);
+ /* ebde count includes the responce bde and data bpl */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
+ } else {
+ /* ebde count includes the responce bde and data bdes */
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
+ }
+ } else {
+ iocb_cmd->un.fcpi64.bdl.bdeSize =
+ ((num_bde + 2) * sizeof(struct ulp_bde64));
+ }
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = le32_to_cpu(scsi_bufflen(scsi_cmnd));
+ return 0;
+}
+
+/*
+ * Given a scsi cmnd, determine the BlockGuard profile to be used
+ * with the cmd
+ */
+static int
+lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
+{
+ uint8_t guard_type = scsi_host_get_guard(sc->device->host);
+ uint8_t ret_prof = LPFC_PROF_INVALID;
+
+ if (guard_type == SHOST_DIX_GUARD_IP) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ ret_prof = LPFC_PROF_AST2;
+ break;
+
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret_prof = LPFC_PROF_A1;
+ break;
+
+ case SCSI_PROT_READ_CONVERT:
+ case SCSI_PROT_WRITE_CONVERT:
+ ret_prof = LPFC_PROF_AST1;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_NORMAL:
+ default:
+ printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+ scsi_get_prot_op(sc), guard_type);
+ break;
+
+ }
+ } else if (guard_type == SHOST_DIX_GUARD_CRC) {
+ switch (scsi_get_prot_op(sc)) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret_prof = LPFC_PROF_A1;
+ break;
+
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ ret_prof = LPFC_PROF_C1;
+ break;
+
+ case SCSI_PROT_READ_CONVERT:
+ case SCSI_PROT_WRITE_CONVERT:
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_NORMAL:
+ default:
+ printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
+ scsi_get_prot_op(sc), guard_type);
+ break;
+ }
+ } else {
+ /* unsupported format */
+ BUG();
+ }
+
+ return ret_prof;
+}
+
+struct scsi_dif_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+static inline unsigned
+lpfc_cmd_blksize(struct scsi_cmnd *sc)
+{
+ return sc->device->sector_size;
+}
+
+/**
+ * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
+ * @sc: in: SCSI command
+ * @apptagmask out: app tag mask
+ * @apptagval out: app tag value
+ * @reftag out: ref tag (reference tag)
+ *
+ * Description:
+ * Extract DIF paramters from the command if possible. Otherwise,
+ * use default paratmers.
+ *
+ **/
+static inline void
+lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
+ uint16_t *apptagval, uint32_t *reftag)
+{
+ struct scsi_dif_tuple *spt;
+ unsigned char op = scsi_get_prot_op(sc);
+ unsigned int protcnt = scsi_prot_sg_count(sc);
+ static int cnt;
+
+ if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
+ op == SCSI_PROT_WRITE_PASS ||
+ op == SCSI_PROT_WRITE_CONVERT)) {
+
+ cnt++;
+ spt = page_address(sg_page(scsi_prot_sglist(sc))) +
+ scsi_prot_sglist(sc)[0].offset;
+ *apptagmask = 0;
+ *apptagval = 0;
+ *reftag = cpu_to_be32(spt->ref_tag);
+
+ } else {
+ /* SBC defines ref tag to be lower 32bits of LBA */
+ *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
+ *apptagmask = 0;
+ *apptagval = 0;
+ }
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_NO_DIF
+ *
+ * This is usually used when the HBA is instructed to generate
+ * DIFs and insert them into data stream (or strip DIF from
+ * incoming data stream)
+ *
+ * The buffer list consists of just one protection group described
+ * below:
+ * +-------------------------+
+ * start of prot group --> | PDE_1 |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ *
+ * Note: Data s/g buffers have been dma mapped
+ */
+static int
+lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datasegcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct lpfc_pde *pde1 = NULL;
+ dma_addr_t physaddr;
+ int i = 0, num_bde = 0;
+ int datadir = sc->sc_data_direction;
+ int prof = LPFC_PROF_INVALID;
+ unsigned blksize;
+ uint32_t reftag;
+ uint16_t apptagmask, apptagval;
+
+ pde1 = (struct lpfc_pde *) bpl;
+ prof = lpfc_sc_to_sli_prof(sc);
+
+ if (prof == LPFC_PROF_INVALID)
+ goto out;
+
+ /* extract some info from the scsi command for PDE1*/
+ blksize = lpfc_cmd_blksize(sc);
+ lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+ /* setup PDE1 with what we have */
+ lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+ BG_EC_STOP_ERR);
+ lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+ num_bde++;
+ bpl++;
+
+ /* assumption: caller has already run dma_map_sg on command data */
+ scsi_for_each_sg(sc, sgde, datasegcnt, i) {
+ physaddr = sg_dma_address(sgde);
+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ bpl->tus.f.bdeSize = sg_dma_len(sgde);
+ if (datadir == DMA_TO_DEVICE)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ num_bde++;
+ }
+
+out:
+ return num_bde;
+}
+
+/*
+ * This function sets up buffer list for protection groups of
+ * type LPFC_PG_TYPE_DIF_BUF
+ *
+ * This is usually used when DIFs are in their own buffers,
+ * separate from the data. The HBA can then by instructed
+ * to place the DIFs in the outgoing stream. For read operations,
+ * The HBA could extract the DIFs and place it in DIF buffers.
+ *
+ * The buffer list for this type consists of one or more of the
+ * protection groups described below:
+ * +-------------------------+
+ * start of first prot group --> | PDE_1 |
+ * +-------------------------+
+ * | PDE_3 (Prot BDE) |
+ * +-------------------------+
+ * | Data BDE |
+ * +-------------------------+
+ * |more Data BDE's ... (opt)|
+ * +-------------------------+
+ * start of new prot group --> | PDE_1 |
+ * +-------------------------+
+ * | ... |
+ * +-------------------------+
+ *
+ * @sc: pointer to scsi command we're working on
+ * @bpl: pointer to buffer list for protection groups
+ * @datacnt: number of segments of data that have been dma mapped
+ * @protcnt: number of segment of protection data that have been dma mapped
+ *
+ * Note: It is assumed that both data and protection s/g buffers have been
+ * mapped for DMA
+ */
+static int
+lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
+ struct ulp_bde64 *bpl, int datacnt, int protcnt)
+{
+ struct scatterlist *sgde = NULL; /* s/g data entry */
+ struct scatterlist *sgpe = NULL; /* s/g prot entry */
+ struct lpfc_pde *pde1 = NULL;
+ struct ulp_bde64 *prot_bde = NULL;
+ dma_addr_t dataphysaddr, protphysaddr;
+ unsigned short curr_data = 0, curr_prot = 0;
+ unsigned int split_offset, protgroup_len;
+ unsigned int protgrp_blks, protgrp_bytes;
+ unsigned int remainder, subtotal;
+ int prof = LPFC_PROF_INVALID;
+ int datadir = sc->sc_data_direction;
+ unsigned char pgdone = 0, alldone = 0;
+ unsigned blksize;
+ uint32_t reftag;
+ uint16_t apptagmask, apptagval;
+ int num_bde = 0;
+
+ sgpe = scsi_prot_sglist(sc);
+ sgde = scsi_sglist(sc);
+
+ if (!sgpe || !sgde) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ sgpe, sgde);
+ return 0;
+ }
+
+ prof = lpfc_sc_to_sli_prof(sc);
+ if (prof == LPFC_PROF_INVALID)
+ goto out;
+
+ /* extract some info from the scsi command for PDE1*/
+ blksize = lpfc_cmd_blksize(sc);
+ lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+
+ split_offset = 0;
+ do {
+ /* setup the first PDE_1 */
+ pde1 = (struct lpfc_pde *) bpl;
+
+ lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
+ BG_EC_STOP_ERR);
+ lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
+
+ num_bde++;
+ bpl++;
+
+ /* setup the first BDE that points to protection buffer */
+ prot_bde = (struct ulp_bde64 *) bpl;
+ protphysaddr = sg_dma_address(sgpe);
+ prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
+ prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ protgroup_len = sg_dma_len(sgpe);
+
+
+ /* must be integer multiple of the DIF block length */
+ BUG_ON(protgroup_len % 8);
+
+ protgrp_blks = protgroup_len / 8;
+ protgrp_bytes = protgrp_blks * blksize;
+
+ prot_bde->tus.f.bdeSize = protgroup_len;
+ if (datadir == DMA_TO_DEVICE)
+ prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
+
+ curr_prot++;
+ num_bde++;
+
+ /* setup BDE's for data blocks associated with DIF data */
+ pgdone = 0;
+ subtotal = 0; /* total bytes processed for current prot grp */
+ while (!pgdone) {
+ if (!sgde) {
+ printk(KERN_ERR "%s Invalid data segment\n",
+ __func__);
+ return 0;
+ }
+ bpl++;
+ dataphysaddr = sg_dma_address(sgde) + split_offset;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
+
+ remainder = sg_dma_len(sgde) - split_offset;
+
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ bpl->tus.f.bdeSize = remainder;
+ split_offset = 0;
+
+ if ((subtotal + remainder) == protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next prot grp */
+ bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
+ split_offset += bpl->tus.f.bdeSize;
+ }
+
+ subtotal += bpl->tus.f.bdeSize;
+
if (datadir == DMA_TO_DEVICE)
- bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
else
- bpl->tus.f.bdeFlags = BUFF_USE_RCV;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
- bpl++;
+
num_bde++;
+ curr_data++;
+
+ if (split_offset)
+ break;
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+ }
+
+ /* are we done ? */
+ if (curr_prot == protcnt) {
+ alldone = 1;
+ } else if (curr_prot < protcnt) {
+ /* advance to next prot buffer */
+ sgpe = sg_next(sgpe);
+ bpl++;
+
+ /* update the reference tag */
+ reftag += protgrp_blks;
+ } else {
+ /* if we're here, we have a bug */
+ printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
+ }
+
+ } while (!alldone);
+
+out:
+
+
+ return num_bde;
+}
+/*
+ * Given a SCSI command that supports DIF, determine composition of protection
+ * groups involved in setting up buffer lists
+ *
+ * Returns:
+ * for DIF (for both read and write)
+ * */
+static int
+lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
+{
+ int ret = LPFC_PG_TYPE_INVALID;
+ unsigned char op = scsi_get_prot_op(sc);
+
+ switch (op) {
+ case SCSI_PROT_READ_STRIP:
+ case SCSI_PROT_WRITE_INSERT:
+ ret = LPFC_PG_TYPE_NO_DIF;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_PASS:
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_WRITE_CONVERT:
+ case SCSI_PROT_READ_CONVERT:
+ ret = LPFC_PG_TYPE_DIF_BUF;
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9021 Unsupported protection op:%d\n", op);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * This is the protection/DIF aware version of
+ * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
+ * two functions eventually, but for now, it's here
+ */
+static int
+lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
+ struct lpfc_scsi_buf *lpfc_cmd)
+{
+ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+ struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+ uint32_t num_bde = 0;
+ int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
+ int prot_group_type = 0;
+ int diflen, fcpdl;
+ unsigned blksize;
+
+ /*
+ * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
+ * fcp_rsp regions to the first data bde entry
+ */
+ bpl += 2;
+ if (scsi_sg_count(scsi_cmnd)) {
+ /*
+ * The driver stores the segment count returned from pci_map_sg
+ * because this a count of dma-mappings used to map the use_sg
+ * pages. They are not guaranteed to be the same for those
+ * architectures that implement an IOMMU.
+ */
+ datasegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_sglist(scsi_cmnd),
+ scsi_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!datasegcnt))
+ return 1;
+
+ lpfc_cmd->seg_cnt = datasegcnt;
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many sg segments from "
+ "dma_map_sg. Config %d, seg_cnt %d\n",
+ __func__, phba->cfg_sg_seg_cnt,
+ lpfc_cmd->seg_cnt);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
+
+ switch (prot_group_type) {
+ case LPFC_PG_TYPE_NO_DIF:
+ num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
+ datasegcnt);
+ /* we shoud have 2 or more entries in buffer list */
+ if (num_bde < 2)
+ goto err;
+ break;
+ case LPFC_PG_TYPE_DIF_BUF:{
+ /*
+ * This type indicates that protection buffers are
+ * passed to the driver, so that needs to be prepared
+ * for DMA
+ */
+ protsegcnt = dma_map_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd), datadir);
+ if (unlikely(!protsegcnt)) {
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ lpfc_cmd->prot_seg_cnt = protsegcnt;
+ if (lpfc_cmd->prot_seg_cnt
+ > phba->cfg_prot_sg_seg_cnt) {
+ printk(KERN_ERR "%s: Too many prot sg segments "
+ "from dma_map_sg. Config %d,"
+ "prot_seg_cnt %d\n", __func__,
+ phba->cfg_prot_sg_seg_cnt,
+ lpfc_cmd->prot_seg_cnt);
+ dma_unmap_sg(&phba->pcidev->dev,
+ scsi_prot_sglist(scsi_cmnd),
+ scsi_prot_sg_count(scsi_cmnd),
+ datadir);
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+
+ num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
+ datasegcnt, protsegcnt);
+ /* we shoud have 3 or more entries in buffer list */
+ if (num_bde < 3)
+ goto err;
+ break;
+ }
+ case LPFC_PG_TYPE_INVALID:
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9022 Unexpected protection group %i\n",
+ prot_group_type);
+ return 1;
}
}
@@ -374,15 +1256,262 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* reinitialized since all iocb memory resources are used many times
* for transmit, receive, and continuation bpl's.
*/
- iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
- iocb_cmd->un.fcpi64.bdl.bdeSize +=
- (num_bde * sizeof (struct ulp_bde64));
+ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
- fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+ fcpdl = scsi_bufflen(scsi_cmnd);
+
+ if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
+ /*
+ * We are in DIF Type 1 mode
+ * Every data block has a 8 byte DIF (trailer)
+ * attached to it. Must ajust FCP data length
+ */
+ blksize = lpfc_cmd_blksize(scsi_cmnd);
+ diflen = (fcpdl / blksize) * 8;
+ fcpdl += diflen;
+ }
+ fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of IOCB here
+ */
+ iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+
return 0;
+err:
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "9023 Could not setup all needed BDE's"
+ "prot_group_type=%d, num_bde=%d\n",
+ prot_group_type, num_bde);
+ return 1;
+}
+
+/*
+ * This function checks for BlockGuard errors detected by
+ * the HBA. In case of errors, the ASC/ASCQ fields in the
+ * sense buffer will be set accordingly, paired with
+ * ILLEGAL_REQUEST to signal to the kernel that the HBA
+ * detected corruption.
+ *
+ * Returns:
+ * 0 - No error found
+ * 1 - BlockGuard error found
+ * -1 - Internal error (bad profile, ...etc)
+ */
+static int
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
+{
+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ int ret = 0;
+ uint32_t bghm = bgf->bghm;
+ uint32_t bgstat = bgf->bgstat;
+ uint64_t failing_sector = 0;
+
+ printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+ "bgstat=0x%x bghm=0x%x\n",
+ cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
+ cmd->request->nr_sectors, bgstat, bghm);
+
+ spin_lock(&_dump_buf_lock);
+ if (!_dump_buf_done) {
+ printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_data(cmd);
+
+ /* If we have a prot sgl, save the DIF buffer */
+ if (lpfc_prot_group_type(phba, cmd) ==
+ LPFC_PG_TYPE_DIF_BUF) {
+ printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
+ (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
+ lpfc_debug_save_dif(cmd);
+ }
+
+ _dump_buf_done = 1;
+ }
+ spin_unlock(&_dump_buf_lock);
+
+ if (lpfc_bgs_get_invalid_prof(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
+ bgstat);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
+ bgstat);
+ ret = (-1);
+ goto out;
+ }
+
+ if (lpfc_bgs_get_guard_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x1);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+ phba->bg_guard_err_cnt++;
+ printk(KERN_ERR "BLKGRD: guard_tag error\n");
+ }
+
+ if (lpfc_bgs_get_reftag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x3);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_reftag_err_cnt++;
+ printk(KERN_ERR "BLKGRD: ref_tag error\n");
+ }
+
+ if (lpfc_bgs_get_apptag_err(bgstat)) {
+ ret = 1;
+
+ scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, 0x2);
+ cmd->result = (DRIVER_SENSE|SUGGEST_DIE) << 24
+ | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+ phba->bg_apptag_err_cnt++;
+ printk(KERN_ERR "BLKGRD: app_tag error\n");
+ }
+
+ if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
+ /*
+ * setup sense data descriptor 0 per SPC-4 as an information
+ * field, and put the failing LBA in it
+ */
+ cmd->sense_buffer[8] = 0; /* Information */
+ cmd->sense_buffer[9] = 0xa; /* Add. length */
+ do_div(bghm, cmd->device->sector_size);
+
+ failing_sector = scsi_get_lba(cmd);
+ failing_sector += bghm;
+
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
+ }
+
+ if (!ret) {
+ /* No error was reported - problem in FW? */
+ cmd->result = ScsiResult(DID_ERROR, 0);
+ printk(KERN_ERR "BLKGRD: no errors reported!\n");
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
+ * @phba: Pointer to hba context object.
+ * @vport: Pointer to vport object.
+ * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
+ * @rsp_iocb: Pointer to response iocb object which reported error.
+ *
+ * This function posts an event when there is a SCSI command reporting
+ * error from the scsi device.
+ **/
+static void
+lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+ uint32_t resp_info = fcprsp->rspStatus2;
+ uint32_t scsi_status = fcprsp->rspStatus3;
+ uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
+ struct lpfc_fast_path_event *fast_path_evt = NULL;
+ struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
+ unsigned long flags;
+
+ /* If there is queuefull or busy condition send a scsi event */
+ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
+ (cmnd->result == SAM_STAT_BUSY)) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.scsi_evt.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.scsi_evt.subcategory =
+ (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
+ LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
+ fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
+ memcpy(&fast_path_evt->un.scsi_evt.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.scsi_evt.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
+ ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.check_cond_evt.scsi_event.event_type =
+ FC_REG_SCSI_EVENT;
+ fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
+ LPFC_EVENT_CHECK_COND;
+ fast_path_evt->un.check_cond_evt.scsi_event.lun =
+ cmnd->device->lun;
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.check_cond_evt.sense_key =
+ cmnd->sense_buffer[2] & 0xf;
+ fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
+ fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
+ } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
+ fcpi_parm &&
+ ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
+ ((scsi_status == SAM_STAT_GOOD) &&
+ !(resp_info & (RESID_UNDER | RESID_OVER))))) {
+ /*
+ * If status is good or resid does not match with fcp_param and
+ * there is valid fcpi_parm, then there is a read_check error
+ */
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ return;
+ fast_path_evt->un.read_check_error.header.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.read_check_error.header.subcategory =
+ LPFC_EVENT_FCPRDCHKERR;
+ memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
+ &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
+ &pnode->nlp_nodename, sizeof(struct lpfc_name));
+ fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
+ fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
+ fast_path_evt->un.read_check_error.fcpiparam =
+ fcpi_parm;
+ } else
+ return;
+
+ fast_path_evt->vport = vport;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+ return;
}
+/**
+ * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd.
+ **/
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
@@ -394,8 +1523,22 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
*/
if (psb->seg_cnt > 0)
scsi_dma_unmap(psb->pCmd);
+ if (psb->prot_seg_cnt > 0)
+ dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
+ scsi_prot_sg_count(psb->pCmd),
+ psb->pCmd->sc_data_direction);
}
+/**
+ * lpfc_handler_fcp_err: FCP response handler.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @rsp_iocb: The response IOCB which contains FCP error.
+ *
+ * This routine is called to process response IOCB with status field
+ * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
+ * based upon SCSI and FCP error.
+ **/
static void
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_iocbq *rsp_iocb)
@@ -411,6 +1554,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
uint32_t rsplen = 0;
uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
+
/*
* If this is a task management command, there is no
* scsi packet associated with this lpfc_cmd. The driver
@@ -436,7 +1580,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
logit = LOG_FCP;
lpfc_printf_vlog(vport, KERN_WARNING, logit,
- "0730 FCP command x%x failed: x%x SNS x%x x%x "
+ "9024 FCP command x%x failed: x%x SNS x%x x%x "
"Data: x%x x%x x%x x%x x%x\n",
cmnd->cmnd[0], scsi_status,
be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
@@ -459,7 +1603,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0716 FCP Read Underrun, expected %d, "
+ "9025 FCP Read Underrun, expected %d, "
"residual %d Data: x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
@@ -475,7 +1619,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_get_resid(cmnd) != fcpi_parm)) {
lpfc_printf_vlog(vport, KERN_WARNING,
LOG_FCP | LOG_FCP_ERROR,
- "0735 FCP Read Check Error "
+ "9026 FCP Read Check Error "
"and Underrun Data: x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
scsi_get_resid(cmnd), fcpi_parm,
@@ -494,7 +1638,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
(scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
< cmnd->underflow)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0717 FCP command x%x residual "
+ "9027 FCP command x%x residual "
"underrun converted to error "
"Data: x%x x%x x%x\n",
cmnd->cmnd[0], scsi_bufflen(cmnd),
@@ -503,7 +1647,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
}
} else if (resp_info & RESID_OVER) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0720 FCP command x%x residual overrun error. "
+ "9028 FCP command x%x residual overrun error. "
"Data: x%x x%x \n", cmnd->cmnd[0],
scsi_bufflen(cmnd), scsi_get_resid(cmnd));
host_status = DID_ERROR;
@@ -515,7 +1659,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
- "0734 FCP Read Check Error Data: "
+ "9029 FCP Read Check Error Data: "
"x%x x%x x%x x%x\n",
be32_to_cpu(fcpcmd->fcpDl),
be32_to_cpu(fcprsp->rspResId),
@@ -526,8 +1670,19 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
out:
cmnd->result = ScsiResult(host_status, scsi_status);
+ lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}
+/**
+ * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
+ * @phba: The Hba for which this call is being executed.
+ * @pIocbIn: The command IOCBQ for the scsi cmnd.
+ * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ *
+ * This routine assigns scsi command result by looking into response IOCB
+ * status field appropriately. This routine handles QUEUE FULL condition as
+ * well by ramping down device queue depth.
+ **/
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
@@ -542,9 +1697,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct scsi_device *sdev, *tmp_sdev;
int depth = 0;
unsigned long flags;
+ struct lpfc_fast_path_event *fast_path_evt;
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+ if (pnode && NLP_CHK_NODE_ACT(pnode))
+ atomic_dec(&pnode->cmd_pending);
if (lpfc_cmd->status) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
@@ -554,7 +1712,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = IOSTAT_DEFAULT;
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
- "0729 FCP cmd x%x failed <%d/%d> "
+ "9030 FCP cmd x%x failed <%d/%d> "
"status: x%x result: x%x Data: x%x x%x\n",
cmd->cmnd[0],
cmd->device ? cmd->device->id : 0xffff,
@@ -570,15 +1728,60 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
break;
case IOSTAT_NPORT_BSY:
case IOSTAT_FABRIC_BSY:
- cmd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ fast_path_evt = lpfc_alloc_fast_evt(phba);
+ if (!fast_path_evt)
+ break;
+ fast_path_evt->un.fabric_evt.event_type =
+ FC_REG_FABRIC_EVENT;
+ fast_path_evt->un.fabric_evt.subcategory =
+ (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
+ LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ memcpy(&fast_path_evt->un.fabric_evt.wwpn,
+ &pnode->nlp_portname,
+ sizeof(struct lpfc_name));
+ memcpy(&fast_path_evt->un.fabric_evt.wwnn,
+ &pnode->nlp_nodename,
+ sizeof(struct lpfc_name));
+ }
+ fast_path_evt->vport = vport;
+ fast_path_evt->work_evt.evt =
+ LPFC_EVT_FASTPATH_MGMT_EVT;
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_add_tail(&fast_path_evt->work_evt.evt_listp,
+ &phba->work_list);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
- if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
+ if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
- lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
+ lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
- } /* else: fall through */
+ }
+
+ if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
+ lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
+ pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
+ if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+ /*
+ * This is a response for a BG enabled
+ * cmd. Parse BG error
+ */
+ lpfc_parse_bg_err(phba, lpfc_cmd,
+ pIocbOut);
+ break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9031 non-zero BGSTAT "
+ "on unprotected cmd");
+ }
+ }
+
+ /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
@@ -586,7 +1789,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
- cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
+ cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
+ SAM_STAT_BUSY);
} else {
cmd->result = ScsiResult(DID_OK, 0);
}
@@ -602,8 +1806,40 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
+ lpfc_update_stats(phba, lpfc_cmd);
result = cmd->result;
sdev = cmd->device;
+ if (vport->cfg_max_scsicmpl_time &&
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if (pnode->cmd_qdepth >
+ atomic_read(&pnode->cmd_pending) &&
+ (atomic_read(&pnode->cmd_pending) >
+ LPFC_MIN_TGT_QDEPTH) &&
+ ((cmd->cmnd[0] == READ_10) ||
+ (cmd->cmnd[0] == WRITE_10)))
+ pnode->cmd_qdepth =
+ atomic_read(&pnode->cmd_pending);
+
+ pnode->last_change_time = jiffies;
+ }
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
+ if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+ time_after(jiffies, pnode->last_change_time +
+ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ pnode->cmd_qdepth += pnode->cmd_qdepth *
+ LPFC_TGTQ_RAMPUP_PCENT / 100;
+ if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+ pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+ pnode->last_change_time = jiffies;
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+ }
+ }
+
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
cmd->scsi_done(cmd);
@@ -647,6 +1883,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
pnode->last_ramp_up_time = jiffies;
}
}
+ lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
+ 0xFFFFFFFF,
+ sdev->queue_depth - 1, sdev->queue_depth);
}
/*
@@ -676,6 +1915,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0711 detected queue full - lun queue "
"depth adjusted to %d.\n", depth);
+ lpfc_send_sdev_queuedepth_change_event(phba, vport,
+ pnode, 0xFFFFFFFF,
+ depth+1, depth);
}
}
@@ -692,6 +1934,33 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+ int i, j;
+ for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+ i += sizeof(uint32_t), j++) {
+ ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+ }
+}
+
+/**
+ * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer.
+ **/
static void
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
struct lpfc_nodelist *pnode)
@@ -747,7 +2016,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
- iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
fcp_cmnd->fcpCntl3 = READ_DATA;
phba->fc4InputRequests++;
}
@@ -758,7 +2026,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
fcp_cmnd->fcpCntl3 = 0;
phba->fc4ControlRequests++;
}
-
+ if (phba->sli_rev == 3 &&
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
/*
* Finish initializing those IOCB fields that are independent
* of the scsi_cmnd request_buffer
@@ -776,6 +2046,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
piocbq->vport = vport;
}
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
@@ -798,11 +2081,14 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
- int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
-
+ if (vport->phba->sli_rev == 3 &&
+ !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
-
piocb->ulpContext = ndlp->nlp_rpi;
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
piocb->ulpFCP2Rcvy = 1;
@@ -823,6 +2109,15 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
return 1;
}
+/**
+ * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
+ * @phba: The Hba for which this call is being executed.
+ * @cmdiocbq: Pointer to lpfc_iocbq data structure.
+ * @rspiocbq: Pointer to lpfc_iocbq data structure.
+ *
+ * This routine is IOCB completion routine for device reset and target reset
+ * routine. This routine release scsi buffer associated with lpfc_cmd.
+ **/
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
@@ -835,6 +2130,20 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
return;
}
+/**
+ * lpfc_scsi_tgt_reset: Target reset handler.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
+ * @vport: The virtual port for which this call is being executed.
+ * @tgt_id: Target ID.
+ * @lun: Lun number.
+ * @rdata: Pointer to lpfc_rport_data.
+ *
+ * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
+ *
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success.
+ **/
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
unsigned tgt_id, unsigned int lun,
@@ -888,6 +2197,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
return ret;
}
+/**
+ * lpfc_info: Info entry point of scsi_host_template data structure.
+ * @host: The scsi host for which this call is being executed.
+ *
+ * This routine provides module information about hba.
+ *
+ * Reutrn code:
+ * Pointer to char - Success.
+ **/
const char *
lpfc_info(struct Scsi_Host *host)
{
@@ -917,6 +2235,13 @@ lpfc_info(struct Scsi_Host *host)
return lpfcinfobuf;
}
+/**
+ * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
+ * The default value of cfg_poll_tmo is 10 milliseconds.
+ **/
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
unsigned long poll_tmo_expires =
@@ -927,11 +2252,25 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
poll_tmo_expires);
}
+/**
+ * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
+ * @phba: The Hba for which this call is being executed.
+ *
+ * This routine starts the fcp_poll_timer of @phba.
+ **/
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
lpfc_poll_rearm_timer(phba);
}
+/**
+ * lpfc_poll_timeout: Restart polling timer.
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine restarts fcp_poll timer, when FCP ring polling is enable
+ * and FCP Ring interrupt is disable.
+ **/
+
void lpfc_poll_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
@@ -943,6 +2282,20 @@ void lpfc_poll_timeout(unsigned long ptr)
}
}
+/**
+ * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
+ * structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ * @done: Pointer to done routine.
+ *
+ * Driver registers this routine to scsi midlayer to submit a @cmd to process.
+ * This routine prepares an IOCB from scsi command and provides to firmware.
+ * The @done callback is invoked after driver finished processing the command.
+ *
+ * Return value :
+ * 0 - Success
+ * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
+ **/
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
@@ -962,17 +2315,32 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
goto out_fail_command;
}
+ if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+
+ printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
+ "str=%s without registering for BlockGuard - "
+ "Rejecting command\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ goto out_fail_command;
+ }
+
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
+ cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
goto out_fail_command;
}
+ if (vport->cfg_max_scsicmpl_time &&
+ (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
+ goto out_host_busy;
+
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL) {
- lpfc_adjust_queue_depth(phba);
+ lpfc_rampdown_queue_depth(phba);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0707 driver's buffer pool is empty, "
@@ -987,20 +2355,81 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->timeout = 0;
+ lpfc_cmd->start_time = jiffies;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
cmnd->scsi_done = done;
- err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
+ "str=%s\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x \n",
+ cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+ cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+ cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+ cmnd->cmnd[9]);
+ if (cmnd->cmnd[0] == READ_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9035 BLKGRD: READ @ sector %llu, "
+ "count %lu\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors);
+ else if (cmnd->cmnd[0] == WRITE_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9036 BLKGRD: WRITE @ sector %llu, "
+ "count %lu cmd=%p\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors,
+ cmnd);
+
+ err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
+ " str=%s\n",
+ cmnd->cmnd[0], scsi_get_prot_op(cmnd),
+ dif_op_str[scsi_get_prot_op(cmnd)]);
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x \n",
+ cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
+ cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
+ cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
+ cmnd->cmnd[9]);
+ if (cmnd->cmnd[0] == READ_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9040 dbg: READ @ sector %llu, "
+ "count %lu\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors);
+ else if (cmnd->cmnd[0] == WRITE_10)
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9041 dbg: WRITE @ sector %llu, "
+ "count %lu cmd=%p\n",
+ (unsigned long long)scsi_get_lba(cmnd),
+ cmnd->request->nr_sectors, cmnd);
+ else
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+ "9042 dbg: parser not implemented\n");
+ err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+ }
+
if (err)
goto out_host_busy_free_buf;
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
+ atomic_inc(&ndlp->cmd_pending);
err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
- if (err)
+ if (err) {
+ atomic_dec(&ndlp->cmd_pending);
goto out_host_busy_free_buf;
-
+ }
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_poll_fcp_ring(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
@@ -1020,6 +2449,12 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
+/**
+ * lpfc_block_error_handler: Routine to block error handler.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
+ **/
static void
lpfc_block_error_handler(struct scsi_cmnd *cmnd)
{
@@ -1036,6 +2471,17 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
return;
}
+/**
+ * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
+ *structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine aborts @cmnd pending in base driver.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
@@ -1131,6 +2577,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
+ *data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does a device reset by sending a TARGET_RESET task management
+ * command.
+ *
+ * Return code :
+ * 0x2003 - Error
+ * 0ex2002 - Success
+ **/
static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
@@ -1145,6 +2603,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int ret = SUCCESS;
int status;
int cnt;
+ struct lpfc_scsi_event_header scsi_event;
lpfc_block_error_handler(cmnd);
/*
@@ -1163,6 +2622,19 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
break;
pnode = rdata->pnode;
}
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_TGTRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ LPFC_NL_VENDOR_ID);
+
if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0721 LUN Reset rport "
@@ -1234,6 +2706,17 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
+ * Template data structure.
+ * @cmnd: Pointer to scsi_cmnd data structure.
+ *
+ * This routine does target reset to all target on @cmnd->device->host.
+ *
+ * Return Code:
+ * 0x2003 - Error
+ * 0x2002 - Success
+ **/
static int
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
{
@@ -1242,10 +2725,23 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
- int ret = SUCCESS, status, i;
+ int ret = SUCCESS, status = SUCCESS, i;
int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
unsigned long later;
+ struct lpfc_scsi_event_header scsi_event;
+
+ scsi_event.event_type = FC_REG_SCSI_EVENT;
+ scsi_event.subcategory = LPFC_EVENT_BUSRESET;
+ scsi_event.lun = 0;
+ memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
+ memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
+
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+ LPFC_NL_VENDOR_ID);
lpfc_block_error_handler(cmnd);
/*
@@ -1311,6 +2807,20 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
return ret;
}
+/**
+ * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
+ * structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
+ * globally available list of scsi buffers. This routine also makes sure scsi
+ * buffer is not allocated more than HBA limit conveyed to midlayer. This list
+ * of scsi buffer exists for the lifetime of the driver.
+ *
+ * Return codes:
+ * non-0 - Error
+ * 0 - Success
+ **/
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
@@ -1372,6 +2882,19 @@ lpfc_slave_alloc(struct scsi_device *sdev)
return 0;
}
+/**
+ * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
+ * structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine configures following items
+ * - Tag command queuing support for @sdev if supported.
+ * - Dev loss time out value of fc_rport.
+ * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
+ *
+ * Return codes:
+ * 0 - Success
+ **/
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
@@ -1401,6 +2924,12 @@ lpfc_slave_configure(struct scsi_device *sdev)
return 0;
}
+/**
+ * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
+ * @sdev: Pointer to scsi_device.
+ *
+ * This routine sets @sdev hostatdata filed to null.
+ **/
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index daba92374985..c7c440d5fa29 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -107,6 +107,10 @@ struct fcp_cmnd {
};
+struct lpfc_scsicmd_bkt {
+ uint32_t cmd_count;
+};
+
struct lpfc_scsi_buf {
struct list_head list;
struct scsi_cmnd *pCmd;
@@ -120,6 +124,8 @@ struct lpfc_scsi_buf {
uint32_t seg_cnt; /* Number of scatter-gather segments returned by
* dma_map_sg. The driver needs this for calls
* to dma_unmap_sg. */
+ uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
+
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
/*
@@ -139,6 +145,7 @@ struct lpfc_scsi_buf {
*/
struct lpfc_iocbq cur_iocbq;
wait_queue_head_t *waitq;
+ unsigned long start_time;
};
#define LPFC_SCSI_DMA_EXT_SIZE 264
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 50fe07646738..01dfdc8696f8 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -32,6 +32,7 @@
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -66,10 +67,16 @@ typedef enum _lpfc_iocb_type {
LPFC_ABORT_IOCB
} lpfc_iocb_type;
- /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
- * to the start of the ring, and the slot number of the
- * desired iocb entry, calc a pointer to that entry.
- */
+/**
+ * lpfc_cmd_iocb: Get next command iocb entry in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next command iocb entry
+ * in the command ring. The caller must hold hbalock to prevent
+ * other threads consume the next command iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -77,6 +84,16 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->cmdidx * phba->iocb_cmd_size);
}
+/**
+ * lpfc_resp_iocb: Get next response iocb entry in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function returns pointer to next response iocb entry
+ * in the response ring. The caller must hold hbalock to make sure
+ * that no other thread consume the next response iocb.
+ * SLI-2/SLI-3 provide different sized iocbs.
+ **/
static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -84,6 +101,15 @@ lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->rspidx * phba->iocb_rsp_size);
}
+/**
+ * __lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
static struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
@@ -94,6 +120,15 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
return iocbq;
}
+/**
+ * lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function
+ * allocates a new driver iocb object from the iocb pool. If the
+ * allocation is successful, it returns pointer to the newly
+ * allocated iocb object else it returns NULL.
+ **/
struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
@@ -106,6 +141,16 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
return iocbq;
}
+/**
+ * __lpfc_sli_release_iocbq: Release iocb to the iocb pool.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -118,6 +163,14 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+/**
+ * lpfc_sli_release_iocbq: Release iocb to the iocb pool.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with no lock held to release the iocb to
+ * iocb pool.
+ **/
void
lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -131,10 +184,21 @@ lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
-/*
- * Translate the iocb command to an iocb command type used to decide the final
- * disposition of each completed IOCB.
- */
+/**
+ * lpfc_sli_iocb_cmd_type: Get the iocb type.
+ * @iocb_cmnd : iocb command code.
+ *
+ * This function is called by ring event handler function to get the iocb type.
+ * This function translates the iocb command to an iocb command type used to
+ * decide the final disposition of each completed IOCB.
+ * The function returns
+ * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
+ * LPFC_SOL_IOCB if it is a solicited iocb completion
+ * LPFC_ABORT_IOCB if it is an abort iocb
+ * LPFC_UNSOL_IOCB if it is an unsolicited iocb
+ *
+ * The caller is not required to hold any lock.
+ **/
static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
{
@@ -230,6 +294,17 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
return type;
}
+/**
+ * lpfc_sli_ring_map: Issue config_ring mbox for all rings.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from SLI initialization code
+ * to configure every ring of the HBA's SLI interface. The
+ * caller is not required to hold any lock. This function issues
+ * a config_ring mailbox command for each ring.
+ * This function returns zero if successful else returns a negative
+ * error code.
+ **/
static int
lpfc_sli_ring_map(struct lpfc_hba *phba)
{
@@ -262,6 +337,18 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
return ret;
}
+/**
+ * lpfc_sli_ringtxcmpl_put: Adds new iocb to the txcmplq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to the driver iocb object.
+ *
+ * This function is called with hbalock held. The function adds the
+ * new iocb to txcmplq of the given ring. This function always returns
+ * 0. If this function is called for ELS ring, this function checks if
+ * there is a vport associated with the ELS command. This function also
+ * starts els_tmofunc timer if this is an ELS command.
+ **/
static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
@@ -282,6 +369,16 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+/**
+ * lpfc_sli_ringtx_get: Get first element of the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to get next
+ * iocb in txq of the given ring. If there is any iocb in
+ * the txq, the function returns first iocb in the list after
+ * removing the iocb from the list, else it returns NULL.
+ **/
static struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -293,14 +390,25 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return cmd_iocb;
}
+/**
+ * lpfc_sli_next_iocb_slot: Get next iocb slot in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held and the caller must post the
+ * iocb without releasing the lock. If the caller releases the lock,
+ * iocb slot returned by the function is not guaranteed to be available.
+ * The function returns pointer to the next available iocb slot if there
+ * is available slot in the ring, else it returns NULL.
+ * If the get index of the ring is ahead of the put index, the function
+ * will post an error attention event to the worker thread to take the
+ * HBA to offline state.
+ **/
static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t max_cmd_idx = pring->numCiocb;
-
if ((pring->next_cmdidx == pring->cmdidx) &&
(++pring->next_cmdidx >= max_cmd_idx))
pring->next_cmdidx = 0;
@@ -336,6 +444,18 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return lpfc_cmd_iocb(phba, pring);
}
+/**
+ * lpfc_sli_next_iotag: Get an iotag for the iocb.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function gets an iotag for the iocb. If there is no unused iotag and
+ * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
+ * array and assigns a new iotag.
+ * The function returns the allocated iotag if successful, else returns zero.
+ * Zero is not a valid iotag.
+ * The caller is not required to hold any lock.
+ **/
uint16_t
lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
@@ -399,6 +519,20 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
return 0;
}
+/**
+ * lpfc_sli_submit_iocb: Submit an iocb to the firmware.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocb: Pointer to iocb slot in the ring.
+ * @nextiocb: Pointer to driver iocb object which need to be
+ * posted to firmware.
+ *
+ * This function is called with hbalock held to post a new iocb to
+ * the firmware. This function copies the new iocb to ring iocb slot and
+ * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * a completion call back for this iocb else the function will free the
+ * iocb object.
+ **/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
@@ -408,6 +542,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
@@ -441,6 +576,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
+/**
+ * lpfc_sli_update_full_ring: Update the chip attention register.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * The caller is not required to hold any lock for calling this function.
+ * This function updates the chip attention bits for the ring to inform firmware
+ * that there are pending work to be done for this ring and requests an
+ * interrupt when there is space available in the ring. This function is
+ * called when the driver is unable to post more iocbs to the ring due
+ * to unavailability of space in the ring.
+ **/
static void
lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -460,6 +607,15 @@ lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
pring->stats.iocb_cmd_full++;
}
+/**
+ * lpfc_sli_update_ring: Update chip attention register.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function updates the chip attention register bit for the
+ * given ring to inform HBA that there is more work to be done
+ * in this ring. The caller is not required to hold any lock.
+ **/
static void
lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -468,11 +624,22 @@ lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
/*
* Tell the HBA that there is work to do in this ring.
*/
- wmb();
- writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
- readl(phba->CAregaddr); /* flush */
+ if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
+ wmb();
+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
+ readl(phba->CAregaddr); /* flush */
+ }
}
+/**
+ * lpfc_sli_resume_iocb: Process iocbs in the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called with hbalock held to post pending iocbs
+ * in the txq to the firmware. This function is called when driver
+ * detects space available in the ring.
+ **/
static void
lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -504,6 +671,16 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return;
}
+/**
+ * lpfc_sli_next_hbq_slot: Get next hbq entry for the HBQ.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function is called with hbalock held to get the next
+ * available slot for the given HBQ. If there is free slot
+ * available for the HBQ it will return pointer to the next available
+ * HBQ entry else it will return NULL.
+ **/
static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
@@ -539,6 +716,15 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
hbqp->hbqPutIdx;
}
+/**
+ * lpfc_sli_hbqbuf_free_all: Free all the hbq buffers.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held to free all the
+ * hbq buffers while uninitializing the SLI interface. It also
+ * frees the HBQ buffers returned by the firmware but not yet
+ * processed by the upper layers.
+ **/
void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
@@ -584,6 +770,18 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, flags);
}
+/**
+ * lpfc_sli_hbq_to_firmware: Post the hbq buffer to firmware.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a
+ * hbq buffer to the firmware. If the function finds an empty
+ * slot in the HBQ, it will post the buffer. The function will return
+ * pointer to the hbq entry if it successfully post the buffer
+ * else it will return NULL.
+ **/
static struct lpfc_hbq_entry *
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
@@ -612,6 +810,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
return hbqe;
}
+/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
.entry_count = 200,
@@ -623,6 +822,7 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
.add_count = 5,
};
+/* HBQ for the extra ring if needed */
static struct lpfc_hbq_init lpfc_extra_hbq = {
.rn = 1,
.entry_count = 200,
@@ -634,51 +834,81 @@ static struct lpfc_hbq_init lpfc_extra_hbq = {
.add_count = 5,
};
+/* Array of HBQs */
struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
&lpfc_extra_hbq,
};
+/**
+ * lpfc_sli_hbqbuf_fill_hbqs: Post more hbq buffers to HBQ.
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @count: Number of HBQ buffers to be posted.
+ *
+ * This function is called with no lock held to post more hbq buffers to the
+ * given HBQ. The function returns the number of HBQ buffers successfully
+ * posted.
+ **/
static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
- uint32_t i, start, end;
+ uint32_t i, posted = 0;
unsigned long flags;
struct hbq_dmabuf *hbq_buffer;
-
+ LIST_HEAD(hbq_buf_list);
if (!phba->hbqs[hbqno].hbq_alloc_buffer)
return 0;
- start = phba->hbqs[hbqno].buffer_count;
- end = count + start;
- if (end > lpfc_hbq_defs[hbqno]->entry_count)
- end = lpfc_hbq_defs[hbqno]->entry_count;
-
+ if ((phba->hbqs[hbqno].buffer_count + count) >
+ lpfc_hbq_defs[hbqno]->entry_count)
+ count = lpfc_hbq_defs[hbqno]->entry_count -
+ phba->hbqs[hbqno].buffer_count;
+ if (!count)
+ return 0;
+ /* Allocate HBQ entries */
+ for (i = 0; i < count; i++) {
+ hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
+ if (!hbq_buffer)
+ break;
+ list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
+ }
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use)
- goto out;
-
- /* Populate HBQ entries */
- for (i = start; i < end; i++) {
- hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
- if (!hbq_buffer)
- goto err;
- hbq_buffer->tag = (i | (hbqno << 16));
- if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
+ goto err;
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
+ (hbqno << 16));
+ if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
phba->hbqs[hbqno].buffer_count++;
- else
+ posted++;
+ } else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
-
- out:
spin_unlock_irqrestore(&phba->hbalock, flags);
- return 0;
- err:
+ return posted;
+err:
spin_unlock_irqrestore(&phba->hbalock, flags);
- return 1;
+ while (!list_empty(&hbq_buf_list)) {
+ list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
+ dbuf.list);
+ (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
+ }
+ return 0;
}
+/**
+ * lpfc_sli_hbqbuf_add_hbqs: Post more HBQ buffers to firmware.
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ number.
+ *
+ * This function posts more buffers to the HBQ. This function
+ * is called with no lock held. The function returns the number of HBQ entries
+ * successfully allocated.
+ **/
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
@@ -686,6 +916,15 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->add_count));
}
+/**
+ * lpfc_sli_hbqbuf_init_hbqs: Post initial buffers to the HBQ.
+ * @phba: Pointer to HBA context object.
+ * @qno: HBQ queue number.
+ *
+ * This function is called from SLI initialization code path with
+ * no lock held to post initial HBQ buffers to firmware. The
+ * function returns the number of HBQ entries successfully allocated.
+ **/
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
@@ -693,6 +932,16 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
lpfc_hbq_defs[qno]->init_count));
}
+/**
+ * lpfc_sli_hbqbuf_find: Find the hbq buffer associated with a tag.
+ * @phba: Pointer to HBA context object.
+ * @tag: Tag of the hbq buffer.
+ *
+ * This function is called with hbalock held. This function searches
+ * for the hbq buffer associated with the given tag in the hbq buffer
+ * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
+ * it returns NULL.
+ **/
static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
@@ -716,6 +965,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
return NULL;
}
+/**
+ * lpfc_sli_free_hbq: Give back the hbq buffer to firmware.
+ * @phba: Pointer to HBA context object.
+ * @hbq_buffer: Pointer to HBQ buffer.
+ *
+ * This function is called with hbalock. This function gives back
+ * the hbq buffer to firmware. If the HBQ does not have space to
+ * post the buffer, it will free the buffer.
+ **/
void
lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
{
@@ -729,6 +987,15 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
}
}
+/**
+ * lpfc_sli_chk_mbx_command: Check if the mailbox is a legitimate mailbox.
+ * @mbxCommand: mailbox command code.
+ *
+ * This function is called by the mailbox event handler function to verify
+ * that the completed mailbox command is a legitimate mailbox command. If the
+ * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
+ * and the mailbox event handler will take the HBA offline.
+ **/
static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
{
@@ -785,6 +1052,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
case MBX_REG_VPI:
case MBX_UNREG_VPI:
case MBX_HEARTBEAT:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
ret = mbxCommand;
break;
default:
@@ -793,6 +1062,19 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
}
return ret;
}
+
+/**
+ * lpfc_sli_wake_mbox_wait: Completion handler for mbox issued from
+ * lpfc_sli_issue_mbox_wait.
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_sli_issue_mbox_wait function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
static void
lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
@@ -812,6 +1094,17 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
+
+/**
+ * lpfc_sli_def_mbox_cmpl: Default mailbox completion handler.
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function is the default mailbox completion handler. It
+ * frees the memory resources associated with the completed mailbox
+ * command. If the completed command is a REG_LOGIN mailbox command,
+ * this function will issue a UREG_LOGIN to re-claim the RPI.
+ **/
void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
@@ -846,6 +1139,19 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+/**
+ * lpfc_sli_handle_mb_event: Handle mailbox completions from firmware.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the completed mailbox commands and gives it to upper layers. The interrupt
+ * service routine processes mailbox completion interrupt and adds completed
+ * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
+ * Worker thread call lpfc_sli_handle_mb_event, which will return the
+ * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
+ * function returns the mailbox commands to the upper layer by calling the
+ * completion handler function of each mailbox.
+ **/
int
lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
@@ -953,61 +1259,48 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_get_buff: Get the buffer associated with the buffer tag.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: buffer tag.
+ *
+ * This function is called with no lock held. When QUE_BUFTAG_BIT bit
+ * is set in the tag the buffer is posted for a particular exchange,
+ * the function will return the buffer without replacing the buffer.
+ * If the buffer is for unsolicited ELS or CT traffic, this function
+ * returns the buffer and also posts another buffer to the firmware.
+ **/
static struct lpfc_dmabuf *
-lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
+lpfc_sli_get_buff(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ uint32_t tag)
{
- struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
- uint32_t hbqno;
- void *virt; /* virtual address ptr */
- dma_addr_t phys; /* mapped address */
- unsigned long flags;
-
- /* Check whether HBQ is still in use */
- spin_lock_irqsave(&phba->hbalock, flags);
- if (!phba->hbq_in_use) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
- }
+ struct hbq_dmabuf *hbq_entry;
+ if (tag & QUE_BUFTAG_BIT)
+ return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
- if (hbq_entry == NULL) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (!hbq_entry)
return NULL;
- }
- list_del(&hbq_entry->dbuf.list);
-
- hbqno = tag >> 16;
- new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
- if (new_hbq_entry == NULL) {
- list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return &hbq_entry->dbuf;
- }
- new_hbq_entry->tag = -1;
- phys = new_hbq_entry->dbuf.phys;
- virt = new_hbq_entry->dbuf.virt;
- new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
- new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
- hbq_entry->dbuf.phys = phys;
- hbq_entry->dbuf.virt = virt;
- lpfc_sli_free_hbq(phba, hbq_entry);
- list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- return &new_hbq_entry->dbuf;
+ return &hbq_entry->dbuf;
}
-static struct lpfc_dmabuf *
-lpfc_sli_get_buff(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- uint32_t tag)
-{
- if (tag & QUE_BUFTAG_BIT)
- return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
- else
- return lpfc_sli_replace_hbqbuff(phba, tag);
-}
+/**
+ * lpfc_sli_process_unsol_iocb: Unsolicited iocb handler.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the unsolicited iocb.
+ *
+ * This function is called with no lock held by the ring event handler
+ * when there is an unsolicited iocb posted to the response ring by the
+ * firmware. This function gets the buffer associated with the iocbs
+ * and calls the event handler for the ring. This function handles both
+ * qring buffers and hbq buffers.
+ * When the function returns 1 the caller can free the iocb object otherwise
+ * upper layer functions will free the iocb objects.
+ **/
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -1022,8 +1315,6 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
match = 0;
irsp = &(saveq->iocb);
- if (irsp->ulpStatus == IOSTAT_NEED_BUFFER)
- return 1;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
@@ -1192,6 +1483,18 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 1;
}
+/**
+ * lpfc_sli_iocbq_lookup: Find command iocb for the given response iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @prspiocb: Pointer to response iocb object.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given response iocb using the iotag of the
+ * response iocb. This function is called with the hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
@@ -1217,6 +1520,23 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
return NULL;
}
+/**
+ * lpfc_sli_process_sol_iocb: process solicited iocb completion.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the response iocb to be processed.
+ *
+ * This function is called by the ring event handler for non-fcp
+ * rings when there is a new response iocb in the response ring.
+ * The caller is not required to hold any locks. This function
+ * gets the command iocb associated with the response iocb and
+ * calls the completion handler for the command iocb. If there
+ * is no completion handler, the function will free the resources
+ * associated with command iocb. If the response iocb is for
+ * an already aborted command iocb, the status of the completion
+ * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
+ * This function always returns 1.
+ **/
static int
lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
@@ -1233,6 +1553,17 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
+ * If an ELS command failed send an event to mgmt
+ * application.
+ */
+ if (saveq->iocb.ulpStatus &&
+ (pring->ringno == LPFC_ELS_RING) &&
+ (cmdiocbp->iocb.ulpCommand ==
+ CMD_ELS_REQUEST64_CR))
+ lpfc_send_els_failure_event(phba,
+ cmdiocbp, saveq);
+
+ /*
* Post all ELS completions to the worker thread.
* All other are passed to the completion callback.
*/
@@ -1282,12 +1613,20 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return rc;
}
+/**
+ * lpfc_sli_rsp_pointers_error: Response ring pointer error handler.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function is called from the iocb ring event handlers when
+ * put pointer is ahead of the get pointer for a ring. This function signal
+ * an error attention condition to the worker thread and the worker
+ * thread will transition the HBA to offline state.
+ **/
static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
* rsp ring <portRspMax>
@@ -1312,6 +1651,51 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
return;
}
+/**
+ * lpfc_poll_eratt: Error attention polling timer timeout handler.
+ * @ptr: Pointer to address of HBA context object.
+ *
+ * This function is invoked by the Error Attention polling timer when the
+ * timer times out. It will check the SLI Error Attention register for
+ * possible attention events. If so, it will post an Error Attention event
+ * and wake up worker thread to process it. Otherwise, it will set up the
+ * Error Attention polling timer for the next poll.
+ **/
+void lpfc_poll_eratt(unsigned long ptr)
+{
+ struct lpfc_hba *phba;
+ uint32_t eratt = 0;
+
+ phba = (struct lpfc_hba *)ptr;
+
+ /* Check chip HA register for error event */
+ eratt = lpfc_sli_check_eratt(phba);
+
+ if (eratt)
+ /* Tell the worker thread there is work to do */
+ lpfc_worker_wake_up(phba);
+ else
+ /* Restart the timer for next eratt poll */
+ mod_timer(&phba->eratt_poll, jiffies +
+ HZ * LPFC_ERATT_POLL_INTERVAL);
+ return;
+}
+
+/**
+ * lpfc_sli_poll_fcp_ring: Handle FCP ring completion in polling mode.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from lpfc_queuecommand, lpfc_poll_timeout,
+ * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
+ * is enabled.
+ *
+ * The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion.
+ **/
void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
@@ -1320,7 +1704,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq rspiocbq;
- struct lpfc_pgp *pgp;
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t status;
uint32_t portRspPut, portRspMax;
int type;
@@ -1330,11 +1714,6 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
pring->stats.iocb_event++;
- pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
-
-
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
@@ -1372,8 +1751,8 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ *(uint32_t *)&irsp->un1,
+ *((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
@@ -1465,17 +1844,28 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
return;
}
-/*
+/**
+ * lpfc_sli_handle_fast_ring_event: Handle ring events on FCP ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the interrupt context when there is a ring
+ * event for the fcp ring. The caller does not hold any lock.
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call the completion handler of the command iocb
+ * if the response iocb indicates a completion for a command iocb or it is
+ * an abort completion. The function will call lpfc_sli_process_unsol_iocb
+ * function if this is an unsolicited iocb.
* This routine presumes LPFC_FCP_RING handling and doesn't bother
- * to check it explicitly.
- */
+ * to check it explicitly. This function always returns 1.
+ **/
static int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1533,7 +1923,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_adjust_queue_depth(phba);
+ lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
@@ -1548,8 +1938,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7));
+ *(uint32_t *)&irsp->un1,
+ *((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
@@ -1646,13 +2036,28 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
return rc;
}
+/**
+ * lpfc_sli_handle_slow_ring_event: Handle ring events for non-FCP rings.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring
+ * event for non-fcp rings. The caller does not hold any lock .
+ * The function processes each response iocb in the response ring until it
+ * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
+ * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
+ * response iocb indicates a completion of a command iocb. The function
+ * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
+ * iocb. The function frees the resources or calls the completion handler if
+ * this iocb is an abort completion. The function returns 0 when the allocated
+ * iocbs are not freed, otherwise returns 1.
+ **/
int
lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
- struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
- &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
- &phba->slim2p->mbx.us.s2.port[pring->ringno];
+ struct lpfc_pgp *pgp;
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
@@ -1666,6 +2071,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
int rc = 1;
unsigned long iflag;
+ pgp = &phba->port_gp[pring->ringno];
spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
@@ -1760,7 +2166,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_adjust_queue_depth(phba);
+ lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
@@ -1904,6 +2310,16 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
return rc;
}
+/**
+ * lpfc_sli_abort_iocb_ring: Abort all iocbs in the ring.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
@@ -1943,6 +2359,83 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
}
+/**
+ * lpfc_sli_flush_fcp_rings: flush all iocbs in the fcp ring.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * objects in txq and txcmplq. This function will not issue abort iocbs
+ * for all the iocb commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+{
+ LIST_HEAD(txq);
+ LIST_HEAD(txcmplq);
+ struct lpfc_iocbq *iocb;
+ IOCB_t *cmd = NULL;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+
+ /* Currently, only one fcp ring */
+ pring = &psli->ring[psli->fcp_ring];
+
+ spin_lock_irq(&phba->hbalock);
+ /* Retrieve everything on txq */
+ list_splice_init(&pring->txq, &txq);
+ pring->txq_cnt = 0;
+
+ /* Retrieve everything on the txcmplq */
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Flush the txq */
+ while (!list_empty(&txq)) {
+ iocb = list_get_first(&txq, struct lpfc_iocbq, list);
+ cmd = &iocb->iocb;
+ list_del_init(&iocb->list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+
+ /* Flush the txcmpq */
+ while (!list_empty(&txcmplq)) {
+ iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list);
+ cmd = &iocb->iocb;
+ list_del_init(&iocb->list);
+
+ if (!iocb->iocb_cmpl)
+ lpfc_sli_release_iocbq(phba, iocb);
+ else {
+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
+ cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
+ (iocb->iocb_cmpl) (phba, iocb, iocb);
+ }
+ }
+}
+
+/**
+ * lpfc_sli_brdready: Check for host status bits.
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function reads the host status register and compares
+ * with the provided bit mask to check if HBA completed
+ * the restart. This function will wait in a loop for the
+ * HBA to complete restart. If the HBA does not restart within
+ * 15 iterations, the function will reset the HBA again. The
+ * function returns 1 when HBA fail to restart otherwise returns
+ * zero.
+ **/
int
lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
{
@@ -1990,6 +2483,13 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
#define BARRIER_TEST_PATTERN (0xdeadbeef)
+/**
+ * lpfc_reset_barrier: Make HBA ready for HBA reset.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called before resetting an HBA. This
+ * function requests HBA to quiesce DMAs before a reset.
+ **/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
@@ -2063,6 +2563,17 @@ restore_hc:
readl(phba->HCregaddr); /* flush */
}
+/**
+ * lpfc_sli_brdkill: Issue a kill_board mailbox command.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function issues a kill_board mailbox command and waits for
+ * the error attention interrupt. This function is called for stopping
+ * the firmware processing. The caller is not required to hold any
+ * locks. This function calls lpfc_hba_down_post function to free
+ * any pending commands after the kill. The function will return 1 when it
+ * fails to kill the board else will return 0.
+ **/
int
lpfc_sli_brdkill(struct lpfc_hba *phba)
{
@@ -2139,6 +2650,17 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
return ha_copy & HA_ERATT ? 0 : 1;
}
+/**
+ * lpfc_sli_brdreset: Reset the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function resets the HBA by writing HC_INITFF to the control
+ * register. After the HBA resets, this function resets all the iocb ring
+ * indices. This function disables PCI layer parity checking during
+ * the reset.
+ * This function returns 0 always.
+ * The caller is not required to hold any locks.
+ **/
int
lpfc_sli_brdreset(struct lpfc_hba *phba)
{
@@ -2191,12 +2713,24 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_brdrestart: Restart the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
int
lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
- uint16_t skip_post;
volatile uint32_t word0;
void __iomem *to_slim;
@@ -2221,13 +2755,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
- if (phba->pport->port_state) {
- skip_post = 1;
+ if (phba->pport->port_state)
word0 = 1; /* This is really setting up word1 */
- } else {
- skip_post = 0;
+ else
word0 = 0; /* This is really setting up word1 */
- }
to_slim = phba->MBslimaddr + sizeof (uint32_t);
writel(*(uint32_t *) mb, to_slim);
readl(to_slim); /* flush */
@@ -2241,16 +2772,24 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
- if (skip_post)
- mdelay(100);
- else
- mdelay(2000);
+ /* Give the INITFF and Post time to settle. */
+ mdelay(100);
lpfc_hba_down_post(phba);
return 0;
}
+/**
+ * lpfc_sli_chipset_init: Wait for the restart of the HBA after a restart.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called after a HBA restart to wait for successful
+ * restart of the HBA. Successful restart of the HBA is indicated by
+ * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
+ * iteration, the function will restart the HBA again. The function returns
+ * zero if HBA successfully restarted else returns negative error code.
+ **/
static int
lpfc_sli_chipset_init(struct lpfc_hba *phba)
{
@@ -2336,12 +2875,25 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_hbq_count: Get the number of HBQs to be configured.
+ *
+ * This function calculates and returns the number of HBQs required to be
+ * configured.
+ **/
int
lpfc_sli_hbq_count(void)
{
return ARRAY_SIZE(lpfc_hbq_defs);
}
+/**
+ * lpfc_sli_hbq_entry_count: Calculate total number of hbq entries.
+ *
+ * This function adds the number of hbq entries in every HBQ to get
+ * the total number of hbq entries required for the HBA and returns
+ * the total count.
+ **/
static int
lpfc_sli_hbq_entry_count(void)
{
@@ -2354,12 +2906,27 @@ lpfc_sli_hbq_entry_count(void)
return count;
}
+/**
+ * lpfc_sli_hbq_size: Calculate memory required for all hbq entries.
+ *
+ * This function calculates amount of memory required for all hbq entries
+ * to be configured and returns the total memory required.
+ **/
int
lpfc_sli_hbq_size(void)
{
return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
}
+/**
+ * lpfc_sli_hbq_setup: configure and initialize HBQs.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
static int
lpfc_sli_hbq_setup(struct lpfc_hba *phba)
{
@@ -2415,15 +2982,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
/* Initially populate or replenish the HBQs */
- for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
- if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
- return -ENOMEM;
- }
+ for (hbqno = 0; hbqno < hbq_count; ++hbqno)
+ lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
return 0;
}
-static int
-lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
+/**
+ * lpfc_sli_config_port: Issue config port mailbox command.
+ * @phba: Pointer to HBA context object.
+ * @sli_mode: sli mode - 2/3
+ *
+ * This function is called by the sli intialization code path
+ * to issue config_port mailbox command. This function restarts the
+ * HBA firmware and issues a config_port mailbox command to configure
+ * the SLI interface in the sli mode specified by sli_mode
+ * variable. The caller is not required to hold any locks.
+ * The function returns 0 if successful, else returns negative error
+ * code.
+ **/
+int
+lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
{
LPFC_MBOXQ_t *pmb;
uint32_t resetcount = 0, rc = 0, done = 0;
@@ -2441,7 +3019,6 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
spin_unlock_irq(&phba->hbalock);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
- msleep(2500);
rc = lpfc_sli_chipset_init(phba);
if (rc)
break;
@@ -2460,13 +3037,16 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
if (rc == -ERESTART) {
phba->link_state = LPFC_LINK_UNKNOWN;
continue;
- } else if (rc) {
+ } else if (rc)
break;
- }
-
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+ phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
+ LPFC_SLI3_HBQ_ENABLED |
+ LPFC_SLI3_CRP_ENABLED |
+ LPFC_SLI3_INB_ENABLED |
+ LPFC_SLI3_BG_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
@@ -2476,30 +3056,76 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
spin_unlock_irq(&phba->hbalock);
rc = -ENXIO;
- } else {
+ } else
done = 1;
- phba->max_vpi = (phba->max_vpi &&
- pmb->mb.un.varCfgPort.gmv) != 0
- ? pmb->mb.un.varCfgPort.max_vpi
- : 0;
- }
}
-
if (!done) {
rc = -EINVAL;
goto do_prep_failed;
}
+ if (pmb->mb.un.varCfgPort.sli_mode == 3) {
+ if (!pmb->mb.un.varCfgPort.cMA) {
+ rc = -ENXIO;
+ goto do_prep_failed;
+ }
+ if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
+ phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
+ phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
+ } else
+ phba->max_vpi = 0;
+ if (pmb->mb.un.varCfgPort.gerbm)
+ phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
+ if (pmb->mb.un.varCfgPort.gcrp)
+ phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
+ if (pmb->mb.un.varCfgPort.ginb) {
+ phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
+ phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
+ phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
+ phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
+ phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
+ phba->inb_last_counter =
+ phba->mbox->us.s3_inb_pgp.counter;
+ } else {
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+ phba->port_gp = phba->mbox->us.s3_pgp.port;
+ phba->inb_ha_copy = NULL;
+ phba->inb_counter = NULL;
+ }
- if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
- (!pmb->mb.un.varCfgPort.cMA)) {
- rc = -ENXIO;
+ if (phba->cfg_enable_bg) {
+ if (pmb->mb.un.varCfgPort.gbg)
+ phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0443 Adapter did not grant "
+ "BlockGuard\n");
+ }
+ } else {
+ phba->hbq_get = NULL;
+ phba->port_gp = phba->mbox->us.s2.port;
+ phba->inb_ha_copy = NULL;
+ phba->inb_counter = NULL;
+ phba->max_vpi = 0;
}
-
do_prep_failed:
mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
+
+/**
+ * lpfc_sli_hba_setup: SLI intialization function.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI intialization function. This function
+ * is called by the HBA intialization code, HBA reset code and HBA
+ * error attention handler code. Caller is not required to hold any
+ * locks. This function issues config_port mailbox command to configure
+ * the SLI, setup iocb rings and HBQ rings. In the end the function
+ * calls the config_port_post function to issue init_link mailbox
+ * command and to start the discovery. The function will return zero
+ * if successful, else it will return negative error code.
+ **/
int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
@@ -2528,22 +3154,20 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
break;
}
- rc = lpfc_do_config_port(phba, mode);
+ rc = lpfc_sli_config_port(phba, mode);
+
if (rc && lpfc_sli_mode == 3)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1820 Unable to select SLI-3. "
"Not supported by adapter.\n");
if (rc && mode != 2)
- rc = lpfc_do_config_port(phba, 2);
+ rc = lpfc_sli_config_port(phba, 2);
if (rc)
goto lpfc_sli_hba_setup_error;
if (phba->sli_rev == 3) {
phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
- phba->sli3_options |= LPFC_SLI3_ENABLED;
- phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
-
} else {
phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
@@ -2558,8 +3182,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
if (rc)
goto lpfc_sli_hba_setup_error;
- /* Init HBQs */
-
+ /* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
if (rc)
@@ -2581,19 +3204,19 @@ lpfc_sli_hba_setup_error:
return rc;
}
-/*! lpfc_mbox_timeout
- *
- * \pre
- * \post
- * \param hba Pointer to per struct lpfc_hba structure
- * \param l1 Pointer to the driver's mailbox queue.
- * \return
- * void
- *
- * \b Description:
+
+/**
+ * lpfc_mbox_timeout: Timeout call back function for mbox timer.
+ * @ptr: context object - pointer to hba structure.
*
- * This routine handles mailbox timeout events at timer interrupt context.
- */
+ * This is the callback function for mailbox timer. The mailbox
+ * timer is armed when a new mailbox command is issued and the timer
+ * is deleted when the mailbox complete. The function is called by
+ * the kernel timer code when a mailbox does not complete within
+ * expected time. This function wakes up the worker thread to
+ * process the mailbox timeout and returns. All the processing is
+ * done by the worker thread function lpfc_mbox_timeout_handler.
+ **/
void
lpfc_mbox_timeout(unsigned long ptr)
{
@@ -2612,6 +3235,15 @@ lpfc_mbox_timeout(unsigned long ptr)
return;
}
+
+/**
+ * lpfc_mbox_timeout_handler: Worker thread function to handle mailbox timeout.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called from worker thread when a mailbox command times out.
+ * The caller is not required to hold any locks. This function will reset the
+ * HBA and recover all the pending commands.
+ **/
void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
@@ -2620,10 +3252,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
- if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
- return;
- }
-
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
@@ -2666,6 +3294,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
return;
}
+/**
+ * lpfc_sli_issue_mbox: Issue a mailbox command to firmware.
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code
+ * to submit a mailbox command to firmware. This function gets the
+ * hbalock to protect the data structures.
+ * The mailbox command can be submitted in polling mode, in which case
+ * this function will wait in a polling loop for the completion of the
+ * mailbox.
+ * If the mailbox is submitted in no_wait mode (not polling) the
+ * function will submit the command and returns immediately without waiting
+ * for the mailbox completion. The no_wait is supported only when HBA
+ * is in SLI2/SLI3 mode - interrupts are enabled.
+ * The SLI interface allows only one mailbox pending at a time. If the
+ * mailbox is issued in polling mode and there is already a mailbox
+ * pending, then the function will return an error. If the mailbox is issued
+ * in NO_WAIT mode and there is a mailbox pending already, the function
+ * will return MBX_BUSY after queuing the mailbox into mailbox queue.
+ * The sli layer owns the mailbox object until the completion of mailbox
+ * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
+ * return codes the caller owns the mailbox command after the return of
+ * the function.
+ **/
int
lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
{
@@ -2676,7 +3330,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
int i;
unsigned long timeout;
unsigned long drvr_flag = 0;
- volatile uint32_t word0, ldata;
+ uint32_t word0, ldata;
void __iomem *to_slim;
int processing_queue = 0;
@@ -2836,12 +3490,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
- MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
@@ -2851,7 +3504,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((volatile uint32_t *)mb);
+ ldata = *((uint32_t *)mb);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
@@ -2883,7 +3536,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First read mbox status word */
- word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
+ word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
} else {
/* First read mbox status word */
@@ -2922,12 +3575,11 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First copy command data */
- word0 = *((volatile uint32_t *)
- &phba->slim2p->mbx);
+ word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
if (mb->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
- volatile uint32_t slimword0;
+ uint32_t slimword0;
/* Check real SLIM for any errors */
slimword0 = readl(phba->MBslimaddr);
slimmb = (MAILBOX_t *) & slimword0;
@@ -2948,8 +3600,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
- MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2980,9 +3631,16 @@ out_not_finished:
return MBX_NOT_FINISHED;
}
-/*
- * Caller needs to hold lock.
- */
+/**
+ * __lpfc_sli_ringtx_put: Add an iocb to the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held to add a command
+ * iocb to the txq when SLI layer cannot submit the command iocb
+ * to the ring.
+ **/
static void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
@@ -2992,6 +3650,23 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
pring->txq_cnt++;
}
+/**
+ * lpfc_sli_next_iocb: Get the next iocb in the txq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to address of newly added command iocb.
+ *
+ * This function is called with hbalock held before a new
+ * iocb is submitted to the firmware. This function checks
+ * txq to flush the iocbs in txq to Firmware before
+ * submitting new iocbs to the Firmware.
+ * If there are iocbs in the txq which need to be submitted
+ * to firmware, lpfc_sli_next_iocb returns the first element
+ * of the txq after dequeuing it from txq.
+ * If there is no iocb in the txq then the function will return
+ * *piocb and *piocb is set to NULL. Caller needs to check
+ * *piocb to find if there are more commands in the txq.
+ **/
static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq **piocb)
@@ -3007,9 +3682,30 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return nextiocb;
}
-/*
- * Lockless version of lpfc_sli_issue_iocb.
- */
+/**
+ * __lpfc_sli_issue_iocb: Lockless version of lpfc_sli_issue_iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb is used by other functions in the driver
+ * to issue an iocb command to the HBA. If the PCI slot is recovering
+ * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR.
+ * When the link is down, this function allows only iocbs for
+ * posting buffers.
+ * This function finds next available slot in the command ring and
+ * posts the command to the available slot and writes the port
+ * attention register to request HBA start processing new iocb.
+ * If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
+ * txq, otherwise the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held.
+ * The function will return success after it successfully submit the
+ * iocb to firmware or after adding to the txq.
+ **/
static int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3052,6 +3748,16 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
+ case CMD_GEN_REQUEST64_CR:
+ case CMD_GEN_REQUEST64_CX:
+ if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
+ FC_FCP_CMND) ||
+ (piocb->iocb.un.genreq64.w5.hcsw.Type !=
+ MENLO_TRANSPORT_TYPE))
+
+ goto iocb_busy;
+ break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
@@ -3106,6 +3812,19 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
+/**
+ * lpfc_sli_issue_iocb: Wrapper function for __lpfc_sli_issue_iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb, uint32_t flag)
@@ -3120,6 +3839,17 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return rc;
}
+/**
+ * lpfc_extra_ring_setup: Extra ring setup function.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
@@ -3155,6 +3885,19 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_async_event_handler: ASYNC iocb handler function.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * This function is called by the slow ring event handler
+ * function when there is an ASYNC event iocb in the ring.
+ * This function is called with no lock held.
+ * Currently this function handles only temperature related
+ * ASYNC events. The function decodes the temperature sensor
+ * event message and posts events for the management applications.
+ **/
static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
@@ -3205,11 +3948,22 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data), (char *) &temp_event_data,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+ LPFC_NL_VENDOR_ID);
}
+/**
+ * lpfc_sli_setup: SLI ring setup function.
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0.
+ **/
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
@@ -3321,6 +4075,17 @@ lpfc_sli_setup(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli_queue_setup: Queue initialization function.
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
int
lpfc_sli_queue_setup(struct lpfc_hba *phba)
{
@@ -3349,6 +4114,23 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
return 1;
}
+/**
+ * lpfc_sli_host_down: Vport cleanup function.
+ * @vport: Pointer to virtual port object.
+ *
+ * lpfc_sli_host_down is called to clean up the resources
+ * associated with a vport before destroying virtual
+ * port data structures.
+ * This function does following operations:
+ * - Free discovery resources associated with this virtual
+ * port.
+ * - Free iocbs associated with this virtual port in
+ * the txq.
+ * - Send abort for all iocb commands associated with this
+ * vport in txcmplq.
+ *
+ * This function is called with no lock held and always returns 1.
+ **/
int
lpfc_sli_host_down(struct lpfc_vport *vport)
{
@@ -3411,6 +4193,21 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
return 1;
}
+/**
+ * lpfc_sli_hba_down: Resource cleanup function for the HBA.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all iocb, buffers, mailbox commands
+ * while shutting down the HBA. This function is called with no
+ * lock held and always returns 1.
+ * This function does the following to cleanup driver resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry
+ * in the list.
+ * - Free up any buffer posted to the HBA
+ * - Free mailbox commands in the mailbox queue.
+ **/
int
lpfc_sli_hba_down(struct lpfc_hba *phba)
{
@@ -3501,6 +4298,18 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
return 1;
}
+/**
+ * lpfc_sli_pcimem_bcopy: SLI memory copy function.
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between driver memory
+ * and the SLI memory. This function also changes the endianness
+ * of each word if native endianness is different from SLI
+ * endianness. This function can be called with or without
+ * lock.
+ **/
void
lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
@@ -3518,6 +4327,17 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
}
}
+
+/**
+ * lpfc_sli_ringpostbuf_put: Function to add a buffer to postbufq.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mp: Pointer to driver buffer object.
+ *
+ * This function is called with no lock held.
+ * It always return zero after adding the buffer to the postbufq
+ * buffer list.
+ **/
int
lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *mp)
@@ -3531,6 +4351,18 @@ lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+/**
+ * lpfc_sli_get_buffer_tag: Tag allocation function for a buffer posted
+ * using CMD_QUE_XRI64_CX iocb.
+ * @phba: Pointer to HBA context object.
+ *
+ * When HBQ is enabled, buffers are searched based on tags. This function
+ * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
+ * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
+ * does not conflict with tags of buffer posted for unsolicited events.
+ * The function returns the allocated tag. The function is called with
+ * no locks held.
+ **/
uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
{
@@ -3545,6 +4377,22 @@ lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
return phba->buffer_tag_count;
}
+/**
+ * lpfc_sli_ring_taggedbuf_get: Search HBQ buffer associated with
+ * posted using CMD_QUE_XRI64_CX iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @tag: Buffer tag.
+ *
+ * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
+ * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
+ * iocb is posted to the response ring with the tag of the buffer.
+ * This function searches the pring->postbufq list using the tag
+ * to find buffer associated with CMD_IOCB_RET_XRI64_CX
+ * iocb. If the buffer is found then lpfc_dmabuf object of the
+ * buffer is returned to the caller else NULL is returned.
+ * This function is called with no lock held.
+ **/
struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t tag)
@@ -3565,7 +4413,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0410 Cannot find virtual addr for buffer tag on "
+ "0402 Cannot find virtual addr for buffer tag on "
"ring %d Data x%lx x%p x%p x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
@@ -3573,6 +4421,23 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return NULL;
}
+/**
+ * lpfc_sli_ringpostbuf_get: SLI2 buffer search function for
+ * unsolicited ct and els events.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @phys: DMA address of the buffer.
+ *
+ * This function searches the buffer list using the dma_address
+ * of unsolicited event to find the driver's lpfc_dmabuf object
+ * corresponding to the dma_address. The function returns the
+ * lpfc_dmabuf object if a buffer is found else it returns NULL.
+ * This function is called by the ct and els unsolicited event
+ * handlers to get the buffer associated with the unsolicited
+ * event.
+ *
+ * This function is called with no lock held.
+ **/
struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dma_addr_t phys)
@@ -3600,6 +4465,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return NULL;
}
+/**
+ * lpfc_sli_abort_els_cmpl: Completion handler for the els abort iocbs.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * This function is the completion handler for the abort iocbs for
+ * ELS commands. This function is called from the ELS ring event
+ * handler with no lock held. This function frees memory resources
+ * associated with the abort iocb.
+ **/
static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3665,6 +4541,17 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_ignore_els_cmpl: Completion handler for aborted ELS command.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ * @rspiocb: Pointer to driver response iocb object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for ELS commands
+ * which are aborted. The function frees memory resources used for
+ * the aborted ELS commands.
+ **/
static void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3673,7 +4560,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0133 Ignoring ELS cmd tag x%x completion Data: "
+ "0139 Ignoring ELS cmd tag x%x completion Data: "
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
@@ -3684,6 +4571,17 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_sli_issue_abort_iotag: Abort function for a command iocb.
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command
+ * iocb. This function is called with hbalock held.
+ * The function returns 0 when it fails due to memory allocation
+ * failure or when the command iocb is an abort request.
+ **/
int
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
@@ -3748,6 +4646,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+ if (retval)
+ __lpfc_sli_release_iocbq(phba, abtsiocbp);
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
@@ -3757,6 +4657,29 @@ abort_iotag_exit:
return retval;
}
+/**
+ * lpfc_sli_validate_fcp_iocb: Filtering function, used to find commands
+ * associated with a vport/SCSI target/lun.
+ * @iocbq: Pointer to driver iocb object.
+ * @vport: Pointer to driver virtual port object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
+ *
+ * This function acts as iocb filter for functions which abort or count
+ * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
+ * 0 if the filtering criteria is met for the given iocb and will return
+ * 1 if the filtering criteria is not met.
+ * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
+ * given iocb is for the SCSI device specified by vport, tgt_id and
+ * lun_id parameter.
+ * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
+ * given iocb is for the SCSI target specified by vport and tgt_id
+ * parameters.
+ * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
+ * given iocb is for the SCSI host associated with the given vport.
+ * This function is called with no locks held.
+ **/
static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
uint16_t tgt_id, uint64_t lun_id,
@@ -3800,6 +4723,25 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
return rc;
}
+/**
+ * lpfc_sli_sum_iocb: Function to count the number of FCP iocbs pending.
+ * @vport: Pointer to virtual port.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function returns number of FCP commands pending for the vport.
+ * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
+ * commands pending on the vport associated with SCSI device specified
+ * by tgt_id and lun_id parameters.
+ * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
+ * commands pending on the vport associated with SCSI target specified
+ * by tgt_id parameter.
+ * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
+ * commands pending on the vport.
+ * This function returns the number of iocbs which satisfy the filter.
+ * This function is called without any lock held.
+ **/
int
lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
@@ -3819,6 +4761,17 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
return sum;
}
+/**
+ * lpfc_sli_abort_fcp_cmpl: Completion handler function for an aborted
+ * FCP iocb.
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This function is called when an aborted FCP iocb completes. This
+ * function is called by the ring event handler with no lock held.
+ * This function frees the iocb.
+ **/
void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
@@ -3827,6 +4780,28 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/**
+ * lpfc_sli_abort_iocb: This function issue abort for all SCSI commands
+ * pending on a SCSI host(vport)/target/lun.
+ * @vport: Pointer to virtual port.
+ * @pring: Pointer to driver SLI ring object.
+ * @tgt_id: SCSI ID of the target.
+ * @lun_id: LUN ID of the scsi device.
+ * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
+ *
+ * This function sends an abort command for every SCSI command
+ * associated with the given virtual port pending on the ring
+ * filtered by lpfc_sli_validate_fcp_iocb function.
+ * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
+ * FCP iocbs associated with lun specified by tgt_id and lun_id
+ * parameters
+ * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
+ * FCP iocbs associated with SCSI target specified by tgt_id parameter.
+ * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
+ * FCP iocbs associated with virtual port.
+ * This function returns number of iocbs it failed to abort.
+ * This function is called with no locks held.
+ **/
int
lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
@@ -3878,6 +4853,24 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
return errcnt;
}
+/**
+ * lpfc_sli_wake_iocb_wait: iocb completion handler for iocb issued using
+ * lpfc_sli_issue_iocb_wait.
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_sli_issue_iocb_wait function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
@@ -3899,13 +4892,36 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
return;
}
-/*
- * Issue the caller's iocb and wait for its completion, but no longer than the
- * caller's timeout. Note that iocb_flags is cleared before the
- * lpfc_sli_issue_call since the wake routine sets a unique value and by
- * definition this is a wait function.
- */
-
+/**
+ * lpfc_sli_issue_iocb_wait: Synchronous function to issue iocb commands.
+ * @phba: Pointer to HBA context object..
+ * @pring: Pointer to sli ring.
+ * @piocb: Pointer to command iocb.
+ * @prspiocbq: Pointer to response iocb.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the iocb to firmware and waits for the
+ * iocb to complete. If the iocb command is not
+ * completed within timeout seconds, it returns IOCB_TIMEDOUT.
+ * Caller should not free the iocb resources if this function
+ * returns IOCB_TIMEDOUT.
+ * The function waits for the iocb completion using an
+ * non-interruptible wait.
+ * This function will sleep while waiting for iocb completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the iocb completions occur while
+ * this function sleep. So, this function cannot be called from
+ * the thread which process iocb completion for this ring.
+ * This function clears the iocb_flag of the iocb object before
+ * issuing the iocb and the iocb completion handler sets this
+ * flag and wakes this thread when the iocb completes.
+ * The contents of the response iocb will be copied to prspiocbq
+ * by the completion handler when the command completes.
+ * This function returns IOCB_SUCCESS when success.
+ * This function is called with no lock held.
+ **/
int
lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
@@ -3963,7 +4979,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- ":0332 IOCB wait issue failed, Data x%x\n",
+ "0332 IOCB wait issue failed, Data x%x\n",
retval);
retval = IOCB_ERROR;
}
@@ -3983,6 +4999,32 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
return retval;
}
+/**
+ * lpfc_sli_issue_mbox_wait: Synchronous function to issue mailbox.
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to driver mailbox object.
+ * @timeout: Timeout in number of seconds.
+ *
+ * This function issues the mailbox to firmware and waits for the
+ * mailbox command to complete. If the mailbox command is not
+ * completed within timeout seconds, it returns MBX_TIMEOUT.
+ * The function waits for the mailbox completion using an
+ * interruptible wait. If the thread is woken up due to a
+ * signal, MBX_TIMEOUT error is returned to the caller. Caller
+ * should not free the mailbox resources, if this function returns
+ * MBX_TIMEOUT.
+ * This function will sleep while waiting for mailbox completion.
+ * So, this function should not be called from any context which
+ * does not allow sleeping. Due to the same reason, this function
+ * cannot be called with interrupt disabled.
+ * This function assumes that the mailbox completion occurs while
+ * this function sleep. So, this function cannot be called from
+ * the worker thread which processes mailbox completion.
+ * This function is called in the context of HBA management
+ * applications.
+ * This function returns MBX_SUCCESS when successful.
+ * This function is called with no lock held.
+ **/
int
lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
@@ -4027,6 +5069,18 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
return retval;
}
+/**
+ * lpfc_sli_flush_mbox_queue: mailbox queue cleanup function.
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to cleanup any pending mailbox
+ * objects in the driver queue before bringing the HBA offline.
+ * This function is called while resetting the HBA.
+ * The function is called without any lock held. The function
+ * takes hbalock to update SLI data structure.
+ * This function returns 1 when there is an active mailbox
+ * command pending else returns 0.
+ **/
int
lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
{
@@ -4058,13 +5112,84 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
}
+/**
+ * lpfc_sli_check_eratt: check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called form timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+ uint32_t ha_copy;
+
+ /* If PCI channel is offline, don't process it */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return 0;
+
+ /* If somebody is waiting to handle an eratt, don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ return 0;
+
+ /* Check if interrupt handler handles this ERATT */
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_ERATT_HANDLED) {
+ /* Interrupt handler has handled ERATT */
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+ }
+
+ /* Read chip Host Attention (HA) register */
+ ha_copy = readl(phba->HAregaddr);
+ if (ha_copy & HA_ERATT) {
+ /* Read host status register to retrieve error event */
+ lpfc_sli_read_hs(phba);
+ /* Set the driver HA work bitmap */
+ phba->work_ha |= HA_ERATT;
+ /* Indicate polling handles this ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ spin_unlock_irq(&phba->hbalock);
+ return 1;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
+ * lpfc_sp_intr_handler: The slow-path interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when the device is enabled with MSI-X multi-message
+ * interrupt mode and there are slow-path events in the HBA. However,
+ * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
+ * this function is called as part of the device-level interrupt handler.
+ * When the PCI slot is in error recovery or the HBA is undergoing
+ * initialization, the interrupt handler will not process the interrupt.
+ * The link attention and ELS ring attention events are handled by the
+ * worker thread. The interrupt handler signals the worker thread and
+ * and returns for these events. This function is called without any
+ * lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
irqreturn_t
-lpfc_intr_handler(int irq, void *dev_id)
+lpfc_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
+ unsigned long iflag;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
@@ -4078,67 +5203,71 @@ lpfc_intr_handler(int irq, void *dev_id)
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
*/
- phba = (struct lpfc_hba *) dev_id;
+ phba = (struct lpfc_hba *)dev_id;
if (unlikely(!phba))
return IRQ_NONE;
- /* If the pci channel is offline, ignore all the interrupts. */
- if (unlikely(pci_channel_offline(phba->pcidev)))
- return IRQ_NONE;
-
- phba->sli.slistat.sli_intr++;
-
- /*
- * Call the HBA to see if it is interrupting. If not, don't claim
- * the interrupt
- */
-
- /* Ignore all interrupts during initialization. */
- if (unlikely(phba->link_state < LPFC_LINK_DOWN))
- return IRQ_NONE;
-
/*
- * Read host attention register to determine interrupt source
- * Clear Attention Sources, except Error Attention (to
- * preserve status) and Link Attention
- */
- spin_lock(&phba->hbalock);
- ha_copy = readl(phba->HAregaddr);
- /* If somebody is waiting to handle an eratt don't process it
- * here. The brdkill function will do this.
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
*/
- if (phba->link_flag & LS_IGNORE_ERATT)
- ha_copy &= ~HA_ERATT;
- writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- spin_unlock(&phba->hbalock);
-
- if (unlikely(!ha_copy))
- return IRQ_NONE;
+ if (phba->intr_type == MSIX) {
+ /* If the pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+ /* Update device-level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+ /* Need to read HA REG for slow-path events */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ ha_copy = readl(phba->HAregaddr);
+ /* If somebody is waiting to handle an eratt don't process it
+ * here. The brdkill function will do this.
+ */
+ if (phba->link_flag & LS_IGNORE_ERATT)
+ ha_copy &= ~HA_ERATT;
+ /* Check the need for handling ERATT in interrupt handler */
+ if (ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+ /* Clear up only attention source related to slow-path */
+ writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
+ phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ } else
+ ha_copy = phba->ha_copy;
work_ha_copy = ha_copy & phba->work_ha_mask;
- if (unlikely(work_ha_copy)) {
+ if (work_ha_copy) {
if (work_ha_copy & HA_LATT) {
if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
/*
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
work_ha_copy &= ~HA_LATT;
}
- if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
+ if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
/*
* Turn off Slow Rings interrupts, LPFC_ELS_RING is
* the only slow ring.
@@ -4147,7 +5276,7 @@ lpfc_intr_handler(int irq, void *dev_id)
(HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
control = readl(phba->HCregaddr);
lpfc_debugfs_slow_ring_trc(phba,
@@ -4176,40 +5305,22 @@ lpfc_intr_handler(int irq, void *dev_id)
(uint32_t)((unsigned long)
&phba->work_waitq));
}
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
}
-
- if (work_ha_copy & HA_ERATT) {
- /*
- * There was a link/board error. Read the
- * status register to retrieve the error event
- * and process it.
- */
- phba->sli.slistat.err_attn_event++;
- /* Save status info */
- phba->work_hs = readl(phba->HSregaddr);
- phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
- phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
-
- /* Clear Chip error bit */
- writel(HA_ERATT, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- phba->pport->stopped = 1;
- }
-
- spin_lock(&phba->hbalock);
- if ((work_ha_copy & HA_MBATT) &&
- (phba->sli.mbox_active)) {
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (work_ha_copy & HA_ERATT)
+ lpfc_sli_read_hs(phba);
+ if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
pmb = phba->sli.mbox_active;
pmbox = &pmb->mb;
- mbox = &phba->slim2p->mbx;
+ mbox = phba->mbox;
vport = pmb->vport;
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
if (pmbox->mbxOwner != OWN_HOST) {
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
/*
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
@@ -4226,7 +5337,7 @@ lpfc_intr_handler(int irq, void *dev_id)
work_ha_copy &= ~HA_MBATT;
} else {
phba->sli.mbox_active = NULL;
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
del_timer(&phba->sli.mbox_tmo);
if (pmb->mbox_cmpl) {
@@ -4270,19 +5381,24 @@ lpfc_intr_handler(int irq, void *dev_id)
lpfc_printf_log(phba,
KERN_ERR,
LOG_MBOX | LOG_SLI,
- "0306 rc should have"
+ "0350 rc should have"
"been MBX_BUSY");
goto send_current_mbox;
}
}
- spin_lock(&phba->pport->work_port_lock);
+ spin_lock_irqsave(
+ &phba->pport->work_port_lock,
+ iflag);
phba->pport->work_port_events &=
~WORKER_MBOX_TMO;
- spin_unlock(&phba->pport->work_port_lock);
+ spin_unlock_irqrestore(
+ &phba->pport->work_port_lock,
+ iflag);
lpfc_mbox_cmpl_put(phba, pmb);
}
} else
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
send_current_mbox:
@@ -4297,20 +5413,80 @@ send_current_mbox:
"MBX_SUCCESS");
}
- spin_lock(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflag);
phba->work_ha |= work_ha_copy;
- spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_worker_wake_up(phba);
}
+ return IRQ_HANDLED;
- ha_copy &= ~(phba->work_ha_mask);
+} /* lpfc_sp_intr_handler */
+
+/**
+ * lpfc_fp_intr_handler: The fast-path interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when the device is enabled with MSI-X multi-message
+ * interrupt mode and there is a fast-path FCP IOCB ring event in the
+ * HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The SCSI FCP fast-path ring event are handled in the
+ * intrrupt context. This function is called without any lock held. It
+ * gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_fp_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ uint32_t ha_copy;
+ unsigned long status;
+ unsigned long iflag;
+
+ /* Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /*
+ * Stuff needs to be attented to when this function is invoked as an
+ * individual interrupt handler in MSI-X multi-message interrupt mode
+ */
+ if (phba->intr_type == MSIX) {
+ /* If pci channel is offline, ignore all the interrupts */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+ /* Update device-level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+ /* Need to read HA REG for FCP ring and other ring events */
+ ha_copy = readl(phba->HAregaddr);
+ /* Clear up only attention source related to fast-path */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
+ phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ } else
+ ha_copy = phba->ha_copy;
/*
- * Process all events on FCP ring. Take the optimized path for
- * FCP IO. Any other IO is slow path and is handled by
- * the worker thread.
+ * Process all events on FCP ring. Take the optimized path for FCP IO.
*/
- status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ ha_copy &= ~(phba->work_ha_mask);
+
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
if (status & HA_RXMASK)
lpfc_sli_handle_fast_ring_event(phba,
@@ -4319,11 +5495,10 @@ send_current_mbox:
if (phba->cfg_multi_ring_support == 2) {
/*
- * Process all events on extra ring. Take the optimized path
- * for extra ring IO. Any other IO is slow path and is handled
- * by the worker thread.
+ * Process all events on extra ring. Take the optimized path
+ * for extra ring IO.
*/
- status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
@@ -4332,5 +5507,106 @@ send_current_mbox:
}
}
return IRQ_HANDLED;
+} /* lpfc_fp_intr_handler */
+
+/**
+ * lpfc_intr_handler: The device-level interrupt handler of lpfc driver.
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler called from the PCI
+ * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
+ * an event in the HBA which requires driver attention. This function
+ * invokes the slow-path interrupt attention handling function and fast-path
+ * interrupt attention handling function in turn to process the relevant
+ * HBA attention events. This function is called without any lock held. It
+ * gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_intr_handler(int irq, void *dev_id)
+{
+ struct lpfc_hba *phba;
+ irqreturn_t sp_irq_rc, fp_irq_rc;
+ unsigned long status1, status2;
+
+ /*
+ * Get the driver's phba structure from the dev_id and
+ * assume the HBA is not interrupting.
+ */
+ phba = (struct lpfc_hba *) dev_id;
+
+ if (unlikely(!phba))
+ return IRQ_NONE;
+
+ /* If the pci channel is offline, ignore all the interrupts. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IRQ_NONE;
+
+ /* Update device level interrupt statistics */
+ phba->sli.slistat.sli_intr++;
+
+ /* Ignore all interrupts during initialization. */
+ if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+ return IRQ_NONE;
+
+ spin_lock(&phba->hbalock);
+ phba->ha_copy = readl(phba->HAregaddr);
+ if (unlikely(!phba->ha_copy)) {
+ spin_unlock(&phba->hbalock);
+ return IRQ_NONE;
+ } else if (phba->ha_copy & HA_ERATT) {
+ if (phba->hba_flag & HBA_ERATT_HANDLED)
+ /* ERATT polling has handled ERATT */
+ phba->ha_copy &= ~HA_ERATT;
+ else
+ /* Indicate interrupt handler handles ERATT */
+ phba->hba_flag |= HBA_ERATT_HANDLED;
+ }
+
+ /* Clear attention sources except link and error attentions */
+ writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ spin_unlock(&phba->hbalock);
+
+ /*
+ * Invokes slow-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with mailbox and link attention */
+ status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
+
+ /* status of events with ELS ring */
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
+ status2 >>= (4*LPFC_ELS_RING);
+
+ if (status1 || (status2 & HA_RXMASK))
+ sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
+ else
+ sp_irq_rc = IRQ_NONE;
+
+ /*
+ * Invoke fast-path host attention interrupt handling as appropriate.
+ */
+
+ /* status of events with FCP ring */
+ status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
+ status1 >>= (4*LPFC_FCP_RING);
+
+ /* status of events with extra ring */
+ if (phba->cfg_multi_ring_support == 2) {
+ status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
+ status2 >>= (4*LPFC_EXTRA_RING);
+ } else
+ status2 = 0;
+
+ if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
+ fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
+ else
+ fp_irq_rc = IRQ_NONE;
-} /* lpfc_intr_handler */
+ /* Return device-level interrupt handling status */
+ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
+} /* lpfc_intr_handler */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7249fd252cbb..883938652a6a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -233,6 +233,7 @@ struct lpfc_sli {
#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
+#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
struct lpfc_sli_ring ring[LPFC_MAX_RING];
int fcp_ring; /* ring used for FCP initiator commands */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ad24cacfbe10..7e32e95c5392 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,9 +18,11 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.7"
+#define LPFC_DRIVER_VERSION "8.3.0"
-#define LPFC_DRIVER_NAME "lpfc"
+#define LPFC_DRIVER_NAME "lpfc"
+#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
+#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 109f89d98830..63b54c66756c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -34,6 +34,7 @@
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h"
#include "lpfc_sli.h"
+#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -204,6 +205,77 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
return 1;
}
+/**
+ * lpfc_discovery_wait: Wait for driver discovery to quiesce.
+ * @vport: The virtual port for which this call is being executed.
+ *
+ * This driver calls this routine specifically from lpfc_vport_delete
+ * to enforce a synchronous execution of vport
+ * delete relative to discovery activities. The
+ * lpfc_vport_delete routine should not return until it
+ * can reasonably guarantee that discovery has quiesced.
+ * Post FDISC LOGO, the driver must wait until its SAN teardown is
+ * complete and all resources recovered before allowing
+ * cleanup.
+ *
+ * This routine does not require any locks held.
+ **/
+static void lpfc_discovery_wait(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t wait_flags = 0;
+ unsigned long wait_time_max;
+ unsigned long start_time;
+
+ wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE |
+ FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO;
+
+ /*
+ * The time constraint on this loop is a balance between the
+ * fabric RA_TOV value and dev_loss tmo. The driver's
+ * devloss_tmo is 10 giving this loop a 3x multiplier minimally.
+ */
+ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max += jiffies;
+ start_time = jiffies;
+ while (time_before(jiffies, wait_time_max)) {
+ if ((vport->num_disc_nodes > 0) ||
+ (vport->fc_flag & wait_flags) ||
+ ((vport->port_state > LPFC_VPORT_FAILED) &&
+ (vport->port_state < LPFC_VPORT_READY))) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ "1833 Vport discovery quiesce Wait:"
+ " vpi x%x state x%x fc_flags x%x"
+ " num_nodes x%x, waiting 1000 msecs"
+ " total wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag, vport->num_disc_nodes,
+ jiffies_to_msecs(jiffies - start_time));
+ msleep(1000);
+ } else {
+ /* Base case. Wait variants satisfied. Break out */
+ lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+ "1834 Vport discovery quiesced:"
+ " vpi x%x state x%x fc_flags x%x"
+ " wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag,
+ jiffies_to_msecs(jiffies
+ - start_time));
+ break;
+ }
+ }
+
+ if (time_after(jiffies, wait_time_max))
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "1835 Vport discovery quiesce failed:"
+ " vpi x%x state x%x fc_flags x%x"
+ " wait msecs x%x\n",
+ vport->vpi, vport->port_state,
+ vport->fc_flag,
+ jiffies_to_msecs(jiffies - start_time));
+}
+
int
lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
{
@@ -216,10 +288,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int vpi;
int rc = VPORT_ERROR;
int status;
- int size;
- if ((phba->sli_rev < 3) ||
- !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
+ if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
"1808 Create VPORT failed: "
"NPIV is not enabled: SLImode:%d\n",
@@ -279,20 +349,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
- size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
- if (size) {
- vport->vname = kzalloc(size+1, GFP_KERNEL);
- if (!vport->vname) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
- "1814 Create VPORT failed. "
- "vname allocation failed.\n");
- rc = VPORT_ERROR;
- lpfc_free_vpi(phba, vpi);
- destroy_port(vport);
- goto error_out;
- }
- memcpy(vport->vname, fc_vport->symbolic_name, size+1);
- }
if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0)
@@ -322,6 +378,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
+ /* Create binary sysfs attribute for vport */
+ lpfc_alloc_sysfs_attr(vport);
+
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
@@ -333,6 +392,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (disable) {
+ lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
rc = VPORT_OK;
goto out;
}
@@ -506,13 +566,21 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* initiated after we've disposed of all other resources associated
* with the port.
*/
- if (!scsi_host_get(shost) || !scsi_host_get(shost))
+ if (!scsi_host_get(shost))
return VPORT_INVAL;
+ if (!scsi_host_get(shost)) {
+ scsi_host_put(shost);
+ return VPORT_INVAL;
+ }
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(&phba->hbalock);
- kfree(vport->vname);
+
+ lpfc_free_sysfs_attr(vport);
+
lpfc_debugfs_terminate(vport);
+
+ /* Remove FC host and then SCSI host with the vport */
fc_remove_host(lpfc_shost_from_vport(vport));
scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -597,11 +665,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ goto skip_logo;
if (!lpfc_issue_els_npiv_logo(vport, ndlp))
while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
timeout = schedule_timeout(timeout);
}
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_discovery_wait(vport);
+
skip_logo:
lpfc_cleanup(vport);
lpfc_sli_host_down(vport);
@@ -615,8 +688,10 @@ skip_logo:
* Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
* does the scsi_host_put() to release the vport.
*/
- lpfc_mbx_unreg_vpi(vport);
- }
+ if (lpfc_mbx_unreg_vpi(vport))
+ scsi_host_put(shost);
+ } else
+ scsi_host_put(shost);
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
@@ -663,3 +738,82 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
scsi_host_put(lpfc_shost_from_vport(vports[i]));
kfree(vports);
}
+
+
+/**
+ * lpfc_vport_reset_stat_data: Reset the statistical data for the vport.
+ * @vport: Pointer to vport object.
+ *
+ * This function resets the statistical data for the vport. This function
+ * is called with the host_lock held
+ **/
+void
+lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->lat_data)
+ memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
+ sizeof(struct lpfc_scsicmd_bkt));
+ }
+}
+
+
+/**
+ * lpfc_alloc_bucket: Allocate data buffer required for collecting
+ * statistical data.
+ * @vport: Pointer to vport object.
+ *
+ * This function allocates data buffer required for all the FC
+ * nodes of the vport to collect statistical data.
+ **/
+void
+lpfc_alloc_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+
+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
+ ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
+ sizeof(struct lpfc_scsicmd_bkt),
+ GFP_ATOMIC);
+
+ if (!ndlp->lat_data)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0287 lpfc_alloc_bucket failed to "
+ "allocate statistical data buffer DID "
+ "0x%x\n", ndlp->nlp_DID);
+ }
+ }
+}
+
+/**
+ * lpfc_free_bucket: Free data buffer required for collecting
+ * statistical data.
+ * @vport: Pointer to vport object.
+ *
+ * Th function frees statistical data buffer of all the FC
+ * nodes of the vport.
+ **/
+void
+lpfc_free_bucket(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+
+ kfree(ndlp->lat_data);
+ ndlp->lat_data = NULL;
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 96c445333b69..90828340acea 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -112,4 +112,8 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
+void lpfc_vport_reset_stat_data(struct lpfc_vport *);
+void lpfc_alloc_bucket(struct lpfc_vport *);
+void lpfc_free_bucket(struct lpfc_vport *);
+
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 887682a24e36..c24e86f07804 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -53,7 +53,8 @@ struct mac_esp_priv {
void __iomem *pdma_io;
int error;
};
-static struct platform_device *internal_esp, *external_esp;
+static struct platform_device *internal_pdev, *external_pdev;
+static struct esp *esp_chips[2];
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
@@ -170,7 +171,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
#define MAC_ESP_PDMA_LOOP(operands) \
asm volatile ( \
- " tstw %2 \n" \
+ " tstw %1 \n" \
" jbeq 20f \n" \
"1: movew " operands " \n" \
"2: movew " operands " \n" \
@@ -188,14 +189,14 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
"14: movew " operands " \n" \
"15: movew " operands " \n" \
"16: movew " operands " \n" \
- " subqw #1,%2 \n" \
+ " subqw #1,%1 \n" \
" jbne 1b \n" \
- "20: tstw %3 \n" \
+ "20: tstw %2 \n" \
" jbeq 30f \n" \
"21: movew " operands " \n" \
- " subqw #1,%3 \n" \
+ " subqw #1,%2 \n" \
" jbne 21b \n" \
- "30: tstw %4 \n" \
+ "30: tstw %3 \n" \
" jbeq 40f \n" \
"31: moveb " operands " \n" \
"32: nop \n" \
@@ -223,8 +224,8 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
" .long 31b,40b \n" \
" .long 32b,40b \n" \
" .previous \n" \
- : "+a" (addr) \
- : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
+ : "+a" (addr), "+r" (count32), "+r" (count2) \
+ : "g" (count1), "a" (mep->pdma_io))
static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
@@ -247,19 +248,20 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
do {
unsigned int count32 = esp_count >> 5;
unsigned int count2 = (esp_count & 0x1F) >> 1;
+ unsigned int count1 = esp_count & 1;
unsigned int start_addr = addr;
if (mac_esp_wait_for_dreq(esp))
break;
if (write) {
- MAC_ESP_PDMA_LOOP("%1@,%0@+");
+ MAC_ESP_PDMA_LOOP("%4@,%0@+");
esp_count -= addr - start_addr;
} else {
unsigned int n;
- MAC_ESP_PDMA_LOOP("%0@+,%1@");
+ MAC_ESP_PDMA_LOOP("%0@+,%4@");
if (mac_esp_wait_for_empty_fifo(esp))
break;
@@ -442,6 +444,32 @@ static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
+static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
+{
+ int got_intr;
+
+ /*
+ * This is an edge triggered IRQ, so we have to be careful to
+ * avoid missing a transition when it is shared by two ESP devices.
+ */
+
+ do {
+ got_intr = 0;
+ if (esp_chips[0] &&
+ (mac_esp_read8(esp_chips[0], ESP_STATUS) & ESP_STAT_INTR)) {
+ (void)scsi_esp_intr(irq, esp_chips[0]);
+ got_intr = 1;
+ }
+ if (esp_chips[1] &&
+ (mac_esp_read8(esp_chips[1], ESP_STATUS) & ESP_STAT_INTR)) {
+ (void)scsi_esp_intr(irq, esp_chips[1]);
+ got_intr = 1;
+ }
+ } while (got_intr);
+
+ return IRQ_HANDLED;
+}
+
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
@@ -556,10 +584,16 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
}
host->irq = IRQ_MAC_SCSI;
- err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
- esp);
- if (err < 0)
- goto fail_free_priv;
+ esp_chips[dev->id] = esp;
+ mb();
+ if (esp_chips[!dev->id] == NULL) {
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0,
+ "Mac ESP", NULL);
+ if (err < 0) {
+ esp_chips[dev->id] = NULL;
+ goto fail_free_priv;
+ }
+ }
err = scsi_esp_register(esp, &dev->dev);
if (err)
@@ -568,7 +602,8 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
return 0;
fail_free_irq:
- free_irq(host->irq, esp);
+ if (esp_chips[!dev->id] == NULL)
+ free_irq(host->irq, esp);
fail_free_priv:
kfree(mep);
fail_free_command_block:
@@ -587,7 +622,9 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
scsi_esp_unregister(esp);
- free_irq(irq, esp);
+ esp_chips[dev->id] = NULL;
+ if (!(esp_chips[0] || esp_chips[1]))
+ free_irq(irq, NULL);
kfree(mep);
@@ -614,19 +651,18 @@ static int __init mac_esp_init(void)
if (err)
return err;
- internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
- if (internal_esp && platform_device_add(internal_esp)) {
- platform_device_put(internal_esp);
- internal_esp = NULL;
+ internal_pdev = platform_device_alloc(DRV_MODULE_NAME, 0);
+ if (internal_pdev && platform_device_add(internal_pdev)) {
+ platform_device_put(internal_pdev);
+ internal_pdev = NULL;
}
-
- external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
- if (external_esp && platform_device_add(external_esp)) {
- platform_device_put(external_esp);
- external_esp = NULL;
+ external_pdev = platform_device_alloc(DRV_MODULE_NAME, 1);
+ if (external_pdev && platform_device_add(external_pdev)) {
+ platform_device_put(external_pdev);
+ external_pdev = NULL;
}
- if (internal_esp || external_esp) {
+ if (internal_pdev || external_pdev) {
return 0;
} else {
platform_driver_unregister(&esp_mac_driver);
@@ -638,13 +674,13 @@ static void __exit mac_esp_exit(void)
{
platform_driver_unregister(&esp_mac_driver);
- if (internal_esp) {
- platform_device_unregister(internal_esp);
- internal_esp = NULL;
+ if (internal_pdev) {
+ platform_device_unregister(internal_pdev);
+ internal_pdev = NULL;
}
- if (external_esp) {
- platform_device_unregister(external_esp);
- external_esp = NULL;
+ if (external_pdev) {
+ platform_device_unregister(external_pdev);
+ external_pdev = NULL;
}
}
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 0248919bc2df..bf2a1c516293 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -47,7 +47,6 @@
#include <asm/macintosh.h>
#include <asm/macints.h>
-#include <asm/machw.h>
#include <asm/mac_via.h>
#include "scsi.h"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 28c9da7d4a5c..7dc62deb4087 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4402,6 +4402,10 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scb_t *scb;
int rval;
+ scmd = scsi_allocate_command(GFP_KERNEL);
+ if (!scmd)
+ return -ENOMEM;
+
/*
* The internal commands share one command id and hence are
* serialized. This is so because we want to reserve maximum number of
@@ -4412,12 +4416,11 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
scb = &adapter->int_scb;
memset(scb, 0, sizeof(scb_t));
- scmd = &adapter->int_scmd;
- memset(scmd, 0, sizeof(Scsi_Cmnd));
-
sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
scmd->device = sdev;
+ memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb));
+ scmd->cmnd = adapter->int_cdb;
scmd->device->host = adapter->host;
scmd->host_scribble = (void *)scb;
scmd->cmnd[0] = MEGA_INTERNAL_CMD;
@@ -4456,6 +4459,8 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
mutex_unlock(&adapter->int_mtx);
+ scsi_free_command(GFP_KERNEL, scmd);
+
return rval;
}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index ee70bd4ae4ba..795201fa0b48 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -888,8 +888,8 @@ typedef struct {
u8 sglen; /* f/w supported scatter-gather list length */
+ unsigned char int_cdb[MAX_COMMAND_SIZE];
scb_t int_scb;
- Scsi_Cmnd int_scmd;
struct mutex int_mtx; /* To synchronize the internal
commands */
struct completion int_waitq; /* wait queue for internal
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index afe1de998763..17ce7abe17ee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1016,7 +1016,8 @@ static int megasas_slave_configure(struct scsi_device *sdev)
* The RAID firmware may require extended timeouts.
*/
if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
- sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ;
+ blk_queue_rq_timeout(sdev->request_queue,
+ MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
return 0;
}
@@ -2988,17 +2989,6 @@ static int megasas_mgmt_open(struct inode *inode, struct file *filep)
}
/**
- * megasas_mgmt_release - char node "release" entry point
- */
-static int megasas_mgmt_release(struct inode *inode, struct file *filep)
-{
- filep->private_data = NULL;
- fasync_helper(-1, filep, 0, &megasas_async_queue);
-
- return 0;
-}
-
-/**
* megasas_mgmt_fasync - Async notifier registration from applications
*
* This function adds the calling process to a driver global queue. When an
@@ -3345,7 +3335,6 @@ megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations megasas_mgmt_fops = {
.owner = THIS_MODULE,
.open = megasas_mgmt_open,
- .release = megasas_mgmt_release,
.fasync = megasas_mgmt_fasync,
.unlocked_ioctl = megasas_mgmt_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 22052bb7becb..d06ec5aa6924 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -3401,8 +3401,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
data->NumAddress = pci_resource_len (pdev, 0);
- data->MmioAddress = ioremap_nocache(pci_resource_start(pdev, 1),
- pci_resource_len (pdev, 1));
+ data->MmioAddress = pci_ioremap_bar(pdev, 1);
data->MmioLength = pci_resource_len (pdev, 1);
pci_set_master(pdev);
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 1c79f9794f4e..0ea78d9a37db 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -5708,7 +5708,8 @@ static int osst_sysfs_add(dev_t dev, struct device *device, struct osst_tape * S
struct device *osst_member;
int err;
- osst_member = device_create_drvdata(osst_sysfs_class, device, dev, STp, "%s", name);
+ osst_member = device_create(osst_sysfs_class, device, dev, STp,
+ "%s", name);
if (IS_ERR(osst_member)) {
printk(KERN_WARNING "osst :W: Unable to add sysfs class member %s\n", name);
return PTR_ERR(osst_member);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index b6cd12b2e996..8cb9240596ab 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4294,8 +4294,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = -ENODEV;
#if MEMORY_MAPPED_IO
- ha->mmpbase = ioremap(pci_resource_start(ha->pdev, 1),
- pci_resource_len(ha->pdev, 1));
+ ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
if (!ha->mmpbase) {
printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
goto error_free_response_ring;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0ddfe7106b3b..cd53627cc761 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (ha->fw_dump_reading == 0)
return 0;
@@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
int reading;
if (off != 0)
@@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
break;
qla_printk(KERN_INFO, ha,
- "Firmware dump cleared on (%ld).\n", ha->host_no);
+ "Firmware dump cleared on (%ld).\n", vha->host_no);
ha->fw_dump_reading = 0;
ha->fw_dumped = 0;
@@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
qla_printk(KERN_INFO, ha,
"Raw firmware dump ready for read on (%ld).\n",
- ha->host_no);
+ vha->host_no);
}
break;
case 2:
- qla2x00_alloc_fw_dump(ha);
+ qla2x00_alloc_fw_dump(vha);
break;
case 3:
- qla2x00_system_error(ha);
+ qla2x00_system_error(vha);
break;
}
return (count);
@@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
uint16_t cnt;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
}
/* Write NVRAM. */
- ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count);
- ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base,
+ ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
return (count);
}
@@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (ha->optrom_state != QLA_SREADING)
return 0;
@@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (ha->optrom_state != QLA_SWRITING)
return -EINVAL;
@@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+
uint32_t start = 0;
uint32_t size = ha->optrom_size;
int val, valid;
@@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
ha->optrom_region_start, ha->optrom_region_size));
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
- ha->isp_ops->read_optrom(ha, ha->optrom_buffer,
+ ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
break;
case 2:
@@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
"Writing flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
- ha->isp_ops->write_optrom(ha, ha->optrom_buffer,
+ ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
break;
default:
@@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
return 0;
/* Write NVRAM. */
- ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count);
- ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count);
+ ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
return count;
}
@@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
uint16_t iter, addr, offset;
int rval;
@@ -429,7 +440,7 @@ do_read:
offset = 0;
}
- rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset,
+ rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
SFP_BLOCK_SIZE);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
@@ -469,30 +480,31 @@ static struct sysfs_entry {
};
void
-qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha)
+qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
{
- struct Scsi_Host *host = ha->host;
+ struct Scsi_Host *host = vha->host;
struct sysfs_entry *iter;
int ret;
for (iter = bin_file_entries; iter->name; iter++) {
- if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
+ if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
- qla_printk(KERN_INFO, ha,
+ qla_printk(KERN_INFO, vha->hw,
"Unable to create sysfs %s binary attribute "
"(%d).\n", iter->name, ret);
}
}
void
-qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
+qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
{
- struct Scsi_Host *host = ha->host;
+ struct Scsi_Host *host = vha->host;
struct sysfs_entry *iter;
+ struct qla_hw_data *ha = vha->hw;
for (iter = bin_file_entries; iter->name; iter++) {
if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
}
if (ha->beacon_blink_led == 1)
- ha->isp_ops->beacon_off(ha);
+ ha->isp_ops->beacon_off(vha);
}
/* Scsi_Host attributes. */
@@ -519,22 +531,24 @@ static ssize_t
qla2x00_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
- char fw_str[30];
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+ char fw_str[128];
return snprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops->fw_version_str(ha, fw_str));
+ ha->isp_ops->fw_version_str(vha, fw_str));
}
static ssize_t
qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
uint32_t sn;
if (IS_FWI2_CAPABLE(ha)) {
- qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
+ qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", buf);
}
@@ -547,15 +561,16 @@ static ssize_t
qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device);
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
}
static ssize_t
qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
@@ -565,43 +580,44 @@ static ssize_t
qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
static ssize_t
qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n",
- ha->model_desc ? ha->model_desc: "");
+ vha->hw->model_desc ? vha->hw->model_desc : "");
}
static ssize_t
qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
- ha->isp_ops->pci_info_str(ha, pci_info));
+ vha->hw->isp_ops->pci_info_str(vha, pci_info));
}
static ssize_t
qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
int len = 0;
- if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
- atomic_read(&ha->loop_state) == LOOP_DEAD)
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&vha->loop_state) == LOOP_DEAD)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
- else if (atomic_read(&ha->loop_state) != LOOP_READY ||
- test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
+ else if (atomic_read(&vha->loop_state) != LOOP_READY ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -632,10 +648,10 @@ static ssize_t
qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int len = 0;
- switch (ha->zio_mode) {
+ switch (vha->hw->zio_mode) {
case QLA_ZIO_MODE_6:
len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
break;
@@ -650,7 +666,8 @@ static ssize_t
qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
int val = 0;
uint16_t zio_mode;
@@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
/* Update per-hba values and queue a reset. */
if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = zio_mode;
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return strlen(buf);
}
@@ -677,16 +694,16 @@ static ssize_t
qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100);
+ return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
}
static ssize_t
qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int val = 0;
uint16_t zio_timer;
@@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
return -ERANGE;
zio_timer = (uint16_t)(val / 100);
- ha->zio_timer = zio_timer;
+ vha->hw->zio_timer = zio_timer;
return strlen(buf);
}
@@ -705,10 +722,10 @@ static ssize_t
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int len = 0;
- if (ha->beacon_blink_led)
+ if (vha->hw->beacon_blink_led)
len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
else
len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@@ -719,14 +736,15 @@ static ssize_t
qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
int val = 0;
int rval;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
- if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) {
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
@@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (val)
- rval = ha->isp_ops->beacon_on(ha);
+ rval = ha->isp_ops->beacon_on(vha);
else
- rval = ha->isp_ops->beacon_off(ha);
+ rval = ha->isp_ops->beacon_off(vha);
if (rval != QLA_SUCCESS)
count = 0;
@@ -750,8 +768,8 @@ static ssize_t
qla2x00_optrom_bios_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
ha->bios_revision[0]);
}
@@ -760,8 +778,8 @@ static ssize_t
qla2x00_optrom_efi_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
ha->efi_revision[0]);
}
@@ -770,8 +788,8 @@ static ssize_t
qla2x00_optrom_fcode_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
ha->fcode_revision[0]);
}
@@ -780,8 +798,8 @@ static ssize_t
qla2x00_optrom_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
ha->fw_revision[3]);
@@ -791,8 +809,8 @@ static ssize_t
qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
-
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
ha->qla_stats.total_isp_aborts);
}
@@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
static void
qla2x00_get_host_port_id(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- fc_host_port_id(shost) = ha->d_id.b.domain << 16 |
- ha->d_id.b.area << 8 | ha->d_id.b.al_pa;
+ fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
+ vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
}
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+ struct qla_hw_data *ha = ((struct scsi_qla_host *)
+ (shost_priv(shost)))->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
switch (ha->link_data_rate) {
@@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
uint32_t port_type = FC_PORTTYPE_UNKNOWN;
- if (ha->parent) {
+ if (vha->vp_idx) {
fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
return;
}
- switch (ha->current_topology) {
+ switch (vha->hw->current_topology) {
case ISP_CFG_NL:
port_type = FC_PORTTYPE_LPORT;
break;
@@ -908,11 +927,11 @@ static void
qla2x00_get_starget_node_name(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
- scsi_qla_host_t *ha = shost_priv(host);
+ scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
u64 node_name = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
node_name = wwn_to_u64(fcport->node_name);
@@ -927,11 +946,11 @@ static void
qla2x00_get_starget_port_name(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
- scsi_qla_host_t *ha = shost_priv(host);
+ scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
u64 port_name = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
port_name = wwn_to_u64(fcport->port_name);
@@ -946,11 +965,11 @@ static void
qla2x00_get_starget_port_id(struct scsi_target *starget)
{
struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
- scsi_qla_host_t *ha = shost_priv(host);
+ scsi_qla_host_t *vha = shost_priv(host);
fc_port_t *fcport;
uint32_t port_id = ~0U;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->rport &&
starget->id == fcport->rport->scsi_target_id) {
port_id = fcport->d_id.b.domain << 16 |
@@ -999,29 +1018,30 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
}
qla2x00_abort_fcport_cmds(fcport);
- scsi_target_unblock(&rport->dev);
}
static int
qla2x00_issue_lip(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- qla2x00_loop_reset(ha);
+ qla2x00_loop_reset(vha);
return 0;
}
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+ scsi_qla_host_t *vha = shost_priv(shost);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
@@ -1033,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
- __func__, ha->host_no));
+ __func__, base_vha->host_no));
goto done;
}
memset(stats, 0, DMA_POOL_SIZE);
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
- rval = qla24xx_get_isp_stats(ha, stats, stats_dma);
- } else if (atomic_read(&ha->loop_state) == LOOP_READY &&
- !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) &&
- !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) &&
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+ } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
+ !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
- rval = qla2x00_get_link_status(ha, ha->loop_id, stats,
- stats_dma);
+ rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
+ stats, stats_dma);
}
if (rval != QLA_SUCCESS)
@@ -1078,29 +1098,29 @@ done:
static void
qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost));
+ qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
}
static void
qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
}
static void
qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
u64 node_name;
- if (ha->device_flags & SWITCH_FOUND)
- node_name = wwn_to_u64(ha->fabric_node_name);
+ if (vha->device_flags & SWITCH_FOUND)
+ node_name = wwn_to_u64(vha->fabric_node_name);
else
- node_name = wwn_to_u64(ha->node_name);
+ node_name = wwn_to_u64(vha->node_name);
fc_host_fabric_name(shost) = node_name;
}
@@ -1108,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
+ scsi_qla_host_t *vha = shost_priv(shost);
+ struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!ha->flags.online)
+ if (!base_vha->flags.online)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT)
+ else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
else
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@@ -1122,8 +1143,11 @@ static int
qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
{
int ret = 0;
- scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
- scsi_qla_host_t *vha;
+ int cnt = 0;
+ uint8_t qos = QLA_DEFAULT_QUE_QOS;
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ scsi_qla_host_t *vha = NULL;
+ struct qla_hw_data *ha = base_vha->hw;
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
@@ -1145,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
atomic_set(&vha->vp_state, VP_FAILED);
/* ready to create vport */
- qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
+ qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
+ vha->vp_idx);
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->vp_err_state= VP_ERR_PORTDWN;
vha->vp_prev_err_state= VP_ERR_UNKWN;
/* Check if physical ha port is Up */
- if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
- atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
/* Don't retry or attempt login of this virtual port */
DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
- vha->host_no));
+ base_vha->host_no));
atomic_set(&vha->loop_state, LOOP_DEAD);
if (!disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1172,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
fc_host_supported_classes(vha->host) =
- fc_host_supported_classes(ha->host);
+ fc_host_supported_classes(base_vha->host);
fc_host_supported_speeds(vha->host) =
- fc_host_supported_speeds(ha->host);
+ fc_host_supported_speeds(base_vha->host);
qla24xx_vport_disable(fc_vport, disable);
+ /* Create a queue pair for the vport */
+ if (ha->mqenable) {
+ if (ha->npiv_info) {
+ for (; cnt < ha->nvram_npiv_size; cnt++) {
+ if (ha->npiv_info[cnt].port_name ==
+ vha->port_name &&
+ ha->npiv_info[cnt].node_name ==
+ vha->node_name) {
+ qos = ha->npiv_info[cnt].q_qos;
+ break;
+ }
+ }
+ }
+ qla25xx_create_queues(vha, qos);
+ }
+
return 0;
vport_create_failed_2:
qla24xx_disable_vp(vha);
qla24xx_deallocate_vp_id(vha);
- kfree(vha->port_name);
- kfree(vha->node_name);
scsi_host_put(vha->host);
return FC_VPORT_FAILED;
}
@@ -1192,17 +1231,34 @@ static int
qla24xx_vport_delete(struct fc_vport *fc_vport)
{
scsi_qla_host_t *vha = fc_vport->dd_data;
- scsi_qla_host_t *pha = to_qla_parent(vha);
+ fc_port_t *fcport, *tfcport;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t id = vha->vp_idx;
while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
+ if (ha->mqenable) {
+ if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
+ qla_printk(KERN_WARNING, ha,
+ "Queue delete failed.\n");
+ vha->req_ques[0] = ha->req_q_map[0]->id;
+ }
+
qla24xx_disable_vp(vha);
- qla24xx_deallocate_vp_id(vha);
- kfree(vha->node_name);
- kfree(vha->port_name);
+ fc_remove_host(vha->host);
+
+ scsi_remove_host(vha->host);
+
+ list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ fcport = NULL;
+ }
+
+ qla24xx_deallocate_vp_id(vha);
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
@@ -1211,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->host_no, vha->vp_idx, vha));
}
- fc_remove_host(vha->host);
-
- scsi_remove_host(vha->host);
-
scsi_host_put(vha->host);
-
+ qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
return 0;
}
@@ -1319,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
};
void
-qla2x00_init_host_attr(scsi_qla_host_t *ha)
+qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
- fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
- fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
- fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
- fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;;
- fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
+ fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
+ fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
+ fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -1339,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else
speed = FC_PORTSPEED_1GBIT;
- fc_host_supported_speeds(ha->host) = speed;
+ fc_host_supported_speeds(vha->host) = speed;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 510ba64bc286..1cf77772623b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
static inline void
-qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
+qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
{
fw_dump->fw_major_version = htonl(ha->fw_major_version);
fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
}
static inline void *
-qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
+qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
{
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
/* Request queue. */
- memcpy(ptr, ha->request_ring, ha->request_q_length *
+ memcpy(ptr, req->ring, req->length *
sizeof(request_t));
/* Response queue. */
- ptr += ha->request_q_length * sizeof(request_t);
- memcpy(ptr, ha->response_ring, ha->response_q_length *
+ ptr += req->length * sizeof(request_t);
+ memcpy(ptr, rsp->ring, rsp->length *
sizeof(response_t));
- return ptr + (ha->response_q_length * sizeof(response_t));
+ return ptr + (rsp->length * sizeof(response_t));
}
static int
-qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
uint32_t ram_dwords, void **nxt)
{
int rval;
@@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
}
static int
-qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
uint32_t cram_size, void **nxt)
{
int rval;
@@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
}
static int
-qla24xx_soft_reset(scsi_qla_host_t *ha)
+qla24xx_soft_reset(struct qla_hw_data *ha)
{
int rval = QLA_SUCCESS;
uint32_t cnt;
@@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
}
static int
-qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
- uint32_t ram_words, void **nxt)
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+ uint16_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
@@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
* @hardware_locked: Called with the hardware_lock
*/
void
-qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
-
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2300_fw_dump *fw;
void *nxt;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
flags = 0;
@@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
- ha->host_no, ha->fw_dump);
+ base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
* @hardware_locked: Called with the hardware_lock
*/
void
-qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt, timer;
uint16_t risc_address;
uint16_t mb0, mb2;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint16_t __iomem *dmp_reg;
unsigned long flags;
struct qla2100_fw_dump *fw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = 0;
mb0 = mb2 = 0;
@@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
- ha->host_no, ha->fw_dump);
+ base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
}
void
-qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
uint32_t risc_address;
-
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
@@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = ext_mem_cnt = 0;
flags = 0;
@@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
- ha->host_no, ha->fw_dump);
+ base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
}
void
-qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt;
uint32_t risc_address;
-
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_25xxmq __iomem *reg25;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
uint16_t __iomem *mbx_reg;
@@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
uint32_t ext_mem_cnt;
void *nxt;
struct qla2xxx_fce_chain *fcec;
+ struct qla2xxx_mq_chain *mq = NULL;
+ uint32_t qreg_size;
+ uint8_t req_cnt, rsp_cnt, que_cnt;
+ uint32_t que_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = ext_mem_cnt = 0;
flags = 0;
@@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+
+ /* Multi queue registers */
+ if (ha->mqenable) {
+ qreg_size = sizeof(struct qla2xxx_mq_chain);
+ mq = kzalloc(qreg_size, GFP_KERNEL);
+ if (!mq)
+ goto qla25xx_fw_dump_failed_0;
+ req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
+ rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
+ que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
+ mq->count = htonl(que_cnt);
+ mq->chain_size = htonl(qreg_size);
+ mq->type = __constant_htonl(DUMP_CHAIN_MQ);
+ for (cnt = 0; cnt < que_cnt; cnt++) {
+ reg25 = (struct device_reg_25xxmq *) ((void *)
+ ha->mqiobase + cnt * QLA_QUE_PAGE);
+ que_idx = cnt * 4;
+ mq->qregs[que_idx] = htonl(reg25->req_q_in);
+ mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
+ mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
+ mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
+ }
+ }
WRT_REG_DWORD(&reg->iobase_window, 0x00);
RD_REG_DWORD(&reg->iobase_window);
@@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
- fcec = nxt + ntohl(ha->fw_dump->eft_size);
+ if (ha->mqenable) {
+ nxt = nxt + ntohl(ha->fw_dump->eft_size);
+ memcpy(nxt, mq, qreg_size);
+ kfree(mq);
+ fcec = nxt + qreg_size;
+ } else {
+ fcec = nxt + ntohl(ha->fw_dump->eft_size);
+ }
fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
fce_calc_size(ha->fce_bufs));
@@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
} else {
qla_printk(KERN_INFO, ha,
"Firmware dump saved to temp buffer (%ld/%p).\n",
- ha->host_no, ha->fw_dump);
+ base_vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
}
@@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
void
-qla2x00_dump_regs(scsi_qla_host_t *ha)
+qla2x00_dump_regs(scsi_qla_host_t *vha)
{
int i;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
uint16_t __iomem *mbx_reg;
@@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
printk("Mailbox registers:\n");
for (i = 0; i < 6; i++)
- printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i,
+ printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
RD_REG_WORD(mbx_reg++));
}
@@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
if (cnt % 16)
printk("\n");
}
+
+
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e9c0c097f5e..c1794a70a45f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -4,6 +4,9 @@
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
+
+#include "qla_def.h"
+
/*
* Driver debug definitions.
*/
@@ -23,6 +26,7 @@
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
+/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
/*
* Macros use for debugging the driver.
@@ -43,6 +47,7 @@
#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
+#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
#if defined(QL_DEBUG_LEVEL_3)
#define DEBUG3(x) do {x;} while (0)
@@ -127,7 +132,6 @@
#else
#define DEBUG16(x) do {} while (0)
#endif
-
/*
* Firmware Dump structure definition
*/
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain {
uint32_t eregs[8];
};
+struct qla2xxx_mq_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t count;
+ uint32_t qregs[4 * QLA_MQ_SIZE];
+};
+
#define DUMP_CHAIN_VARIANT 0x80000000
#define DUMP_CHAIN_FCE 0x7FFFFAF0
+#define DUMP_CHAIN_MQ 0x7FFFFAF1
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 83c819216771..a29c95204975 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -181,11 +181,14 @@
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
+struct req_que;
+
/*
* SCSI Request Block
*/
typedef struct srb {
- struct scsi_qla_host *ha; /* HA the SP is queued on */
+ struct scsi_qla_host *vha; /* HA the SP is queued on */
+ struct req_que *que;
struct fc_port *fcport;
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -369,9 +372,17 @@ struct device_reg_2xxx {
} u_end;
};
+struct device_reg_25xxmq {
+ volatile uint32_t req_q_in;
+ volatile uint32_t req_q_out;
+ volatile uint32_t rsp_q_in;
+ volatile uint32_t rsp_q_out;
+};
+
typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
+ struct device_reg_25xxmq isp25mq;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@@ -1524,7 +1535,7 @@ typedef struct {
*/
typedef struct fc_port {
struct list_head list;
- struct scsi_qla_host *ha;
+ struct scsi_qla_host *vha;
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -1550,7 +1561,6 @@ typedef struct fc_port {
unsigned long last_queue_full;
unsigned long last_ramp_up;
- struct list_head vp_fcport;
uint16_t vp_idx;
} fc_port_t;
@@ -2037,6 +2047,8 @@ typedef struct vport_params {
#define VP_RET_CODE_NO_MEM 5
#define VP_RET_CODE_NOT_FOUND 6
+struct qla_hw_data;
+
/*
* ISP operations
*/
@@ -2055,10 +2067,11 @@ struct isp_operations {
char * (*fw_version_str) (struct scsi_qla_host *, char *);
irq_handler_t intr_handler;
- void (*enable_intrs) (struct scsi_qla_host *);
- void (*disable_intrs) (struct scsi_qla_host *);
+ void (*enable_intrs) (struct qla_hw_data *);
+ void (*disable_intrs) (struct qla_hw_data *);
- int (*abort_command) (struct scsi_qla_host *, srb_t *);
+ int (*abort_command) (struct scsi_qla_host *, srb_t *,
+ struct req_que *);
int (*target_reset) (struct fc_port *, unsigned int);
int (*lun_reset) (struct fc_port *, unsigned int);
int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2089,6 +2102,10 @@ struct isp_operations {
uint32_t);
int (*get_flash_version) (struct scsi_qla_host *, void *);
+ int (*start_scsi) (srb_t *);
+ void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
+ void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
+ uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
};
/* MSI-X Support *************************************************************/
@@ -2100,16 +2117,18 @@ struct isp_operations {
#define QLA_MSIX_DEFAULT 0x00
#define QLA_MSIX_RSP_Q 0x01
-#define QLA_MSIX_ENTRIES 2
#define QLA_MIDX_DEFAULT 0
#define QLA_MIDX_RSP_Q 1
+#define QLA_PCI_MSIX_CONTROL 0xa2
struct scsi_qla_host;
+struct rsp_que;
struct qla_msix_entry {
int have_irq;
- uint16_t msix_vector;
- uint16_t msix_entry;
+ uint32_t vector;
+ uint16_t entry;
+ struct rsp_que *rsp;
};
#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2160,208 +2179,137 @@ struct qla_statistics {
uint64_t output_bytes;
};
-/*
- * Linux Host Adapter structure
- */
-typedef struct scsi_qla_host {
- struct list_head list;
+/* Multi queue support */
+#define MBC_INITIALIZE_MULTIQ 0x1f
+#define QLA_QUE_PAGE 0X1000
+#define QLA_MQ_SIZE 32
+#define QLA_MAX_HOST_QUES 16
+#define QLA_MAX_QUEUES 256
+#define ISP_QUE_REG(ha, id) \
+ ((ha->mqenable) ? \
+ ((void *)(ha->mqiobase) +\
+ (QLA_QUE_PAGE * id)) :\
+ ((void *)(ha->iobase)))
+#define QLA_REQ_QUE_ID(tag) \
+ ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
+#define QLA_DEFAULT_QUE_QOS 5
+#define QLA_PRECONFIG_VPORTS 32
+#define QLA_MAX_VPORTS_QLA24XX 128
+#define QLA_MAX_VPORTS_QLA25XX 256
+/* Response queue data structure */
+struct rsp_que {
+ dma_addr_t dma;
+ response_t *ring;
+ response_t *ring_ptr;
+ uint16_t ring_index;
+ uint16_t out_ptr;
+ uint16_t length;
+ uint16_t options;
+ uint16_t rid;
+ uint16_t id;
+ uint16_t vp_idx;
+ struct qla_hw_data *hw;
+ struct qla_msix_entry *msix;
+ struct req_que *req;
+};
- /* Commonly used flags and state information. */
- struct Scsi_Host *host;
- struct pci_dev *pdev;
+/* Request queue data structure */
+struct req_que {
+ dma_addr_t dma;
+ request_t *ring;
+ request_t *ring_ptr;
+ uint16_t ring_index;
+ uint16_t in_ptr;
+ uint16_t cnt;
+ uint16_t length;
+ uint16_t options;
+ uint16_t rid;
+ uint16_t id;
+ uint16_t qos;
+ uint16_t vp_idx;
+ struct rsp_que *rsp;
+ srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ uint32_t current_outstanding_cmd;
+ int max_q_depth;
+};
- unsigned long host_no;
+/*
+ * Qlogic host adapter specific data structure.
+*/
+struct qla_hw_data {
+ struct pci_dev *pdev;
+ /* SRB cache. */
+#define SRB_MIN_REQ 128
+ mempool_t *srb_mempool;
volatile struct {
- uint32_t init_done :1;
- uint32_t online :1;
uint32_t mbox_int :1;
uint32_t mbox_busy :1;
- uint32_t rscn_queue_overflow :1;
- uint32_t reset_active :1;
-
- uint32_t management_server_logged_in :1;
- uint32_t process_response_queue :1;
uint32_t disable_risc_code_load :1;
uint32_t enable_64bit_addressing :1;
uint32_t enable_lip_reset :1;
- uint32_t enable_lip_full_login :1;
uint32_t enable_target_reset :1;
+ uint32_t enable_lip_full_login :1;
uint32_t enable_led_scheme :1;
uint32_t inta_enabled :1;
uint32_t msi_enabled :1;
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
- uint32_t vsan_enabled :1;
+ uint32_t vsan_enabled :1;
uint32_t npiv_supported :1;
uint32_t fce_enabled :1;
- uint32_t hw_event_marker_found :1;
+ uint32_t hw_event_marker_found:1;
} flags;
- atomic_t loop_state;
-#define LOOP_TIMEOUT 1
-#define LOOP_DOWN 2
-#define LOOP_UP 3
-#define LOOP_UPDATE 4
-#define LOOP_READY 5
-#define LOOP_DEAD 6
-
- unsigned long dpc_flags;
-#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
-#define RESET_ACTIVE 1
-#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
-#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
-#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
-#define LOOP_RESYNC_ACTIVE 5
-#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
-#define RSCN_UPDATE 7 /* Perform an RSCN update. */
-#define MAILBOX_RETRY 8
-#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
-#define FAILOVER_EVENT_NEEDED 10
-#define FAILOVER_EVENT 11
-#define FAILOVER_NEEDED 12
-#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
-#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
-#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
-#define ABORT_QUEUES_NEEDED 16
-#define RELOGIN_NEEDED 17
-#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
-#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
-#define ISP_ABORT_RETRY 20 /* ISP aborted. */
-#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
-#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
-#define IOCTL_ERROR_RECOVERY 23
-#define LOOP_RESET_NEEDED 24
-#define BEACON_BLINK_NEEDED 25
-#define REGISTER_FDMI_NEEDED 26
-#define FCPORT_UPDATE_NEEDED 27
-#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
-#define UNLOADING 29
-#define NPIV_CONFIG_NEEDED 30
-
- uint32_t device_flags;
-#define DFLG_LOCAL_DEVICES BIT_0
-#define DFLG_RETRY_LOCAL_DEVICES BIT_1
-#define DFLG_FABRIC_DEVICES BIT_2
-#define SWITCH_FOUND BIT_3
-#define DFLG_NO_CABLE BIT_4
-
-#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
-#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
- uint32_t device_type;
-#define DT_ISP2100 BIT_0
-#define DT_ISP2200 BIT_1
-#define DT_ISP2300 BIT_2
-#define DT_ISP2312 BIT_3
-#define DT_ISP2322 BIT_4
-#define DT_ISP6312 BIT_5
-#define DT_ISP6322 BIT_6
-#define DT_ISP2422 BIT_7
-#define DT_ISP2432 BIT_8
-#define DT_ISP5422 BIT_9
-#define DT_ISP5432 BIT_10
-#define DT_ISP2532 BIT_11
-#define DT_ISP8432 BIT_12
-#define DT_ISP_LAST (DT_ISP8432 << 1)
-
-#define DT_IIDMA BIT_26
-#define DT_FWI2 BIT_27
-#define DT_ZIO_SUPPORTED BIT_28
-#define DT_OEM_001 BIT_29
-#define DT_ISP2200A BIT_30
-#define DT_EXTENDED_IDS BIT_31
-
-#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
-#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
-#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
-#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
-#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
-#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
-#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
-#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
-#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
-#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
-#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
-#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
-#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
-#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
-
-#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
- IS_QLA6312(ha) || IS_QLA6322(ha))
-#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
-#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
-#define IS_QLA25XX(ha) (IS_QLA2532(ha))
-#define IS_QLA84XX(ha) (IS_QLA8432(ha))
-#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
- IS_QLA84XX(ha))
-
-#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
-#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
-#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
-#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
-#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
-
- /* SRB cache. */
-#define SRB_MIN_REQ 128
- mempool_t *srb_mempool;
-
/* This spinlock is used to protect "io transactions", you must
- * acquire it before doing any IO to the card, eg with RD_REG*() and
- * WRT_REG*() for the duration of your entire commandtransaction.
- *
- * This spinlock is of lower priority than the io request lock.
- */
-
- spinlock_t hardware_lock ____cacheline_aligned;
+ * acquire it before doing any IO to the card, eg with RD_REG*() and
+ * WRT_REG*() for the duration of your entire commandtransaction.
+ *
+ * This spinlock is of lower priority than the io request lock.
+ */
+ spinlock_t hardware_lock ____cacheline_aligned;
int bars;
int mem_only;
- device_reg_t __iomem *iobase; /* Base I/O address */
+ device_reg_t __iomem *iobase; /* Base I/O address */
resource_size_t pio_address;
-#define MIN_IOBASE_LEN 0x100
-
- /* ISP ring lock, rings, and indexes */
- dma_addr_t request_dma; /* Physical address. */
- request_t *request_ring; /* Base virtual address */
- request_t *request_ring_ptr; /* Current address. */
- uint16_t req_ring_index; /* Current index. */
- uint16_t req_q_cnt; /* Number of available entries. */
- uint16_t request_q_length;
-
- dma_addr_t response_dma; /* Physical address. */
- response_t *response_ring; /* Base virtual address */
- response_t *response_ring_ptr; /* Current address. */
- uint16_t rsp_ring_index; /* Current index. */
- uint16_t response_q_length;
-
- struct isp_operations *isp_ops;
- /* Outstandings ISP commands. */
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
- uint32_t current_outstanding_cmd;
- srb_t *status_srb; /* Status continuation entry. */
+#define MIN_IOBASE_LEN 0x100
+/* Multi queue data structs */
+ device_reg_t *mqiobase;
+ uint16_t msix_count;
+ uint8_t mqenable;
+ struct req_que **req_q_map;
+ struct rsp_que **rsp_q_map;
+ unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
+ uint16_t max_queues;
+ struct qla_npiv_entry *npiv_info;
+ uint16_t nvram_npiv_size;
+
+ uint16_t switch_cap;
+#define FLOGI_SEQ_DEL BIT_8
+#define FLOGI_MID_SUPPORT BIT_10
+#define FLOGI_VSAN_SUPPORT BIT_12
+#define FLOGI_SP_SUPPORT BIT_13
+ /* Timeout timers. */
+ uint8_t loop_down_abort_time; /* port down timer */
+ atomic_t loop_down_timer; /* loop down timer */
+ uint8_t link_down_timeout; /* link down timeout */
+ uint16_t max_loop_id;
- /* ISP configuration data. */
- uint16_t loop_id; /* Host adapter loop id */
- uint16_t switch_cap;
-#define FLOGI_SEQ_DEL BIT_8
-#define FLOGI_MID_SUPPORT BIT_10
-#define FLOGI_VSAN_SUPPORT BIT_12
-#define FLOGI_SP_SUPPORT BIT_13
uint16_t fb_rev;
-
- port_id_t d_id; /* Host adapter port id */
uint16_t max_public_loop_ids;
- uint16_t min_external_loopid; /* First external loop Id */
+ uint16_t min_external_loopid; /* First external loop Id */
#define PORT_SPEED_UNKNOWN 0xFFFF
-#define PORT_SPEED_1GB 0x00
-#define PORT_SPEED_2GB 0x01
-#define PORT_SPEED_4GB 0x03
-#define PORT_SPEED_8GB 0x04
- uint16_t link_data_rate; /* F/W operating speed */
+#define PORT_SPEED_1GB 0x00
+#define PORT_SPEED_2GB 0x01
+#define PORT_SPEED_4GB 0x03
+#define PORT_SPEED_8GB 0x04
+ uint16_t link_data_rate; /* F/W operating speed */
uint8_t current_topology;
uint8_t prev_topology;
@@ -2370,15 +2318,69 @@ typedef struct scsi_qla_host {
#define ISP_CFG_FL 4
#define ISP_CFG_F 8
- uint8_t operating_mode; /* F/W operating mode */
+ uint8_t operating_mode; /* F/W operating mode */
#define LOOP 0
#define P2P 1
#define LOOP_P2P 2
#define P2P_LOOP 3
-
- uint8_t marker_needed;
-
uint8_t interrupts_on;
+ uint32_t isp_abort_cnt;
+
+#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
+#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
+ uint32_t device_type;
+#define DT_ISP2100 BIT_0
+#define DT_ISP2200 BIT_1
+#define DT_ISP2300 BIT_2
+#define DT_ISP2312 BIT_3
+#define DT_ISP2322 BIT_4
+#define DT_ISP6312 BIT_5
+#define DT_ISP6322 BIT_6
+#define DT_ISP2422 BIT_7
+#define DT_ISP2432 BIT_8
+#define DT_ISP5422 BIT_9
+#define DT_ISP5432 BIT_10
+#define DT_ISP2532 BIT_11
+#define DT_ISP8432 BIT_12
+#define DT_ISP_LAST (DT_ISP8432 << 1)
+
+#define DT_IIDMA BIT_26
+#define DT_FWI2 BIT_27
+#define DT_ZIO_SUPPORTED BIT_28
+#define DT_OEM_001 BIT_29
+#define DT_ISP2200A BIT_30
+#define DT_EXTENDED_IDS BIT_31
+#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
+#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
+#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
+#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
+#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
+#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
+#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
+#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
+#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
+#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
+#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
+#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
+#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
+#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
+
+#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
+ IS_QLA6312(ha) || IS_QLA6322(ha))
+#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
+#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
+#define IS_QLA25XX(ha) (IS_QLA2532(ha))
+#define IS_QLA84XX(ha) (IS_QLA8432(ha))
+#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+ IS_QLA84XX(ha))
+#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
+ IS_QLA25XX(ha))
+
+#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
+#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
+#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
+#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
+#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
/* HBA serial number */
uint8_t serial0;
@@ -2386,8 +2388,8 @@ typedef struct scsi_qla_host {
uint8_t serial2;
/* NVRAM configuration data */
-#define MAX_NVRAM_SIZE 4096
-#define VPD_OFFSET MAX_NVRAM_SIZE / 2
+#define MAX_NVRAM_SIZE 4096
+#define VPD_OFFSET MAX_NVRAM_SIZE / 2
uint16_t nvram_size;
uint16_t nvram_base;
void *nvram;
@@ -2401,22 +2403,8 @@ typedef struct scsi_qla_host {
uint16_t r_a_tov;
int port_down_retry_count;
uint8_t mbx_count;
- uint16_t last_loop_id;
- uint16_t mgmt_svr_loop_id;
-
- uint32_t login_retry_count;
- int max_q_depth;
-
- struct list_head work_list;
-
- /* Fibre Channel Device List. */
- struct list_head fcports;
-
- /* RSCN queue. */
- uint32_t rscn_queue[MAX_RSCN_COUNT];
- uint8_t rscn_in_ptr;
- uint8_t rscn_out_ptr;
+ uint32_t login_retry_count;
/* SNS command interfaces. */
ms_iocb_entry_t *ms_iocb;
dma_addr_t ms_iocb_dma;
@@ -2426,28 +2414,20 @@ typedef struct scsi_qla_host {
struct sns_cmd_pkt *sns_cmd;
dma_addr_t sns_cmd_dma;
-#define SFP_DEV_SIZE 256
-#define SFP_BLOCK_SIZE 64
- void *sfp_data;
- dma_addr_t sfp_data_dma;
+#define SFP_DEV_SIZE 256
+#define SFP_BLOCK_SIZE 64
+ void *sfp_data;
+ dma_addr_t sfp_data_dma;
struct task_struct *dpc_thread;
uint8_t dpc_active; /* DPC routine is active */
- /* Timeout timers. */
- uint8_t loop_down_abort_time; /* port down timer */
- atomic_t loop_down_timer; /* loop down timer */
- uint8_t link_down_timeout; /* link down timeout */
-
- uint32_t timer_active;
- struct timer_list timer;
-
dma_addr_t gid_list_dma;
struct gid_list_info *gid_list;
int gid_list_info_size;
/* Small DMA pool allocations -- maximum 256 bytes in length. */
-#define DMA_POOL_SIZE 256
+#define DMA_POOL_SIZE 256
struct dma_pool *s_dma_pool;
dma_addr_t init_cb_dma;
@@ -2459,17 +2439,17 @@ typedef struct scsi_qla_host {
mbx_cmd_t *mcp;
unsigned long mbx_cmd_flags;
-#define MBX_INTERRUPT 1
-#define MBX_INTR_WAIT 2
+#define MBX_INTERRUPT 1
+#define MBX_INTR_WAIT 2
#define MBX_UPDATE_FLASH_ACTIVE 3
- struct mutex vport_lock; /* Virtual port synchronization */
- struct completion mbx_cmd_comp; /* Serialize mbx access */
+ struct mutex vport_lock; /* Virtual port synchronization */
+ struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
uint32_t mbx_flags;
#define MBX_IN_PROGRESS BIT_0
-#define MBX_BUSY BIT_1 /* Got the Access */
+#define MBX_BUSY BIT_1 /* Got the Access */
#define MBX_SLEEPING_ON_SEM BIT_2
#define MBX_POLLING_FOR_COMP BIT_3
#define MBX_COMPLETED BIT_4
@@ -2488,7 +2468,7 @@ typedef struct scsi_qla_host {
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
- uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
+ uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
uint16_t fw_seriallink_options24[4];
@@ -2509,10 +2489,10 @@ typedef struct scsi_qla_host {
uint64_t fce_wr, fce_rd;
struct mutex fce_mutex;
+ uint32_t hw_event_start;
uint32_t hw_event_ptr;
uint32_t hw_event_pause_errors;
- uint8_t host_str[16];
uint32_t pci_attr;
uint16_t chip_revision;
@@ -2523,11 +2503,6 @@ typedef struct scsi_qla_host {
char model_desc[80];
uint8_t adapter_id[16+1];
- uint8_t *node_name;
- uint8_t *port_name;
- uint8_t fabric_node_name[WWN_SIZE];
- uint32_t isp_abort_cnt;
-
/* Option ROM information. */
char *optrom_buffer;
uint32_t optrom_size;
@@ -2538,55 +2513,159 @@ typedef struct scsi_qla_host {
uint32_t optrom_region_start;
uint32_t optrom_region_size;
- /* PCI expansion ROM image information. */
+/* PCI expansion ROM image information. */
#define ROM_CODE_TYPE_BIOS 0
#define ROM_CODE_TYPE_FCODE 1
#define ROM_CODE_TYPE_EFI 3
- uint8_t bios_revision[2];
- uint8_t efi_revision[2];
- uint8_t fcode_revision[16];
+ uint8_t bios_revision[2];
+ uint8_t efi_revision[2];
+ uint8_t fcode_revision[16];
uint32_t fw_revision[4];
- uint16_t fdt_odd_index;
uint32_t fdt_wrt_disable;
uint32_t fdt_erase_cmd;
uint32_t fdt_block_size;
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
- uint32_t flt_region_flt;
- uint32_t flt_region_fdt;
- uint32_t flt_region_boot;
- uint32_t flt_region_fw;
- uint32_t flt_region_vpd_nvram;
- uint32_t flt_region_hw_event;
- uint32_t flt_region_npiv_conf;
+ uint32_t flt_region_flt;
+ uint32_t flt_region_fdt;
+ uint32_t flt_region_boot;
+ uint32_t flt_region_fw;
+ uint32_t flt_region_vpd_nvram;
+ uint32_t flt_region_hw_event;
+ uint32_t flt_region_npiv_conf;
/* Needed for BEACON */
- uint16_t beacon_blink_led;
- uint8_t beacon_color_state;
+ uint16_t beacon_blink_led;
+ uint8_t beacon_color_state;
#define QLA_LED_GRN_ON 0x01
#define QLA_LED_YLW_ON 0x02
#define QLA_LED_ABR_ON 0x04
#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
/* ISP2322: red, green, amber. */
-
- uint16_t zio_mode;
- uint16_t zio_timer;
+ uint16_t zio_mode;
+ uint16_t zio_timer;
struct fc_host_statistics fc_host_stat;
- struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
+ struct qla_msix_entry *msix_entries;
+
+ struct list_head vp_list; /* list of VP */
+ unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
+ sizeof(unsigned long)];
+ uint16_t num_vhosts; /* number of vports created */
+ uint16_t num_vsans; /* number of vsan created */
+ uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
+ int cur_vport_count;
+
+ struct qla_chip_state_84xx *cs84xx;
+ struct qla_statistics qla_stats;
+ struct isp_operations *isp_ops;
+};
+
+/*
+ * Qlogic scsi host structure
+ */
+typedef struct scsi_qla_host {
+ struct list_head list;
+ struct list_head vp_fcports; /* list of fcports */
+ struct list_head work_list;
+ /* Commonly used flags and state information. */
+ struct Scsi_Host *host;
+ unsigned long host_no;
+ uint8_t host_str[16];
+
+ volatile struct {
+ uint32_t init_done :1;
+ uint32_t online :1;
+ uint32_t rscn_queue_overflow :1;
+ uint32_t reset_active :1;
+
+ uint32_t management_server_logged_in :1;
+ uint32_t process_response_queue :1;
+ } flags;
+
+ atomic_t loop_state;
+#define LOOP_TIMEOUT 1
+#define LOOP_DOWN 2
+#define LOOP_UP 3
+#define LOOP_UPDATE 4
+#define LOOP_READY 5
+#define LOOP_DEAD 6
+
+ unsigned long dpc_flags;
+#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
+#define RESET_ACTIVE 1
+#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
+#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
+#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
+#define LOOP_RESYNC_ACTIVE 5
+#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
+#define RSCN_UPDATE 7 /* Perform an RSCN update. */
+#define MAILBOX_RETRY 8
+#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
+#define FAILOVER_EVENT_NEEDED 10
+#define FAILOVER_EVENT 11
+#define FAILOVER_NEEDED 12
+#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
+#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
+#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
+#define ABORT_QUEUES_NEEDED 16
+#define RELOGIN_NEEDED 17
+#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
+#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
+#define ISP_ABORT_RETRY 20 /* ISP aborted. */
+#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
+#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
+#define IOCTL_ERROR_RECOVERY 23
+#define LOOP_RESET_NEEDED 24
+#define BEACON_BLINK_NEEDED 25
+#define REGISTER_FDMI_NEEDED 26
+#define FCPORT_UPDATE_NEEDED 27
+#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
+#define UNLOADING 29
+#define NPIV_CONFIG_NEEDED 30
+
+ uint32_t device_flags;
+#define DFLG_LOCAL_DEVICES BIT_0
+#define DFLG_RETRY_LOCAL_DEVICES BIT_1
+#define DFLG_FABRIC_DEVICES BIT_2
+#define SWITCH_FOUND BIT_3
+#define DFLG_NO_CABLE BIT_4
+
+ srb_t *status_srb; /* Status continuation entry. */
+
+ /* ISP configuration data. */
+ uint16_t loop_id; /* Host adapter loop id */
+
+ port_id_t d_id; /* Host adapter port id */
+ uint8_t marker_needed;
+ uint16_t mgmt_svr_loop_id;
+
+
+
+ /* RSCN queue. */
+ uint32_t rscn_queue[MAX_RSCN_COUNT];
+ uint8_t rscn_in_ptr;
+ uint8_t rscn_out_ptr;
+
+ /* Timeout timers. */
+ uint8_t loop_down_abort_time; /* port down timer */
+ atomic_t loop_down_timer; /* loop down timer */
+ uint8_t link_down_timeout; /* link down timeout */
+
+ uint32_t timer_active;
+ struct timer_list timer;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ uint8_t fabric_node_name[WWN_SIZE];
+ uint32_t vp_abort_cnt;
- struct list_head vp_list; /* list of VP */
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
- unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
- uint16_t num_vhosts; /* number of vports created */
- uint16_t num_vsans; /* number of vsan created */
uint16_t vp_idx; /* vport ID */
- struct scsi_qla_host *parent; /* holds pport */
unsigned long vp_flags;
- struct list_head vp_fcports; /* list of fcports */
#define VP_IDX_ACQUIRED 0 /* bit no 0 */
#define VP_CREATE_NEEDED 1
#define VP_BIND_NEEDED 2
@@ -2605,14 +2684,10 @@ typedef struct scsi_qla_host {
#define VP_ERR_FAB_NORESOURCES 3
#define VP_ERR_FAB_LOGOUT 4
#define VP_ERR_ADAP_NORESOURCES 5
- uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
- int cur_vport_count;
-
- struct qla_chip_state_84xx *cs84xx;
- struct qla_statistics qla_stats;
+ struct qla_hw_data *hw;
+ int req_ques[QLA_MAX_HOST_QUES];
} scsi_qla_host_t;
-
/*
* Macros to help code, maintain, etc.
*/
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 561a4411719d..0e366a1b44b3 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -15,10 +15,11 @@ static atomic_t qla2x00_dfs_root_count;
static int
qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
{
- scsi_qla_host_t *ha = s->private;
+ scsi_qla_host_t *vha = s->private;
uint32_t cnt;
uint32_t *fce;
uint64_t fce_start;
+ struct qla_hw_data *ha = vha->hw;
mutex_lock(&ha->fce_mutex);
@@ -51,7 +52,8 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
static int
qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
{
- scsi_qla_host_t *ha = inode->i_private;
+ scsi_qla_host_t *vha = inode->i_private;
+ struct qla_hw_data *ha = vha->hw;
int rval;
if (!ha->flags.fce_enabled)
@@ -60,7 +62,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
mutex_lock(&ha->fce_mutex);
/* Pause tracing to flush FCE buffers. */
- rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd);
+ rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
if (rval)
qla_printk(KERN_WARNING, ha,
"DebugFS: Unable to disable FCE (%d).\n", rval);
@@ -75,7 +77,8 @@ out:
static int
qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
{
- scsi_qla_host_t *ha = inode->i_private;
+ scsi_qla_host_t *vha = inode->i_private;
+ struct qla_hw_data *ha = vha->hw;
int rval;
if (ha->flags.fce_enabled)
@@ -86,7 +89,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
/* Re-enable FCE tracing. */
ha->flags.fce_enabled = 1;
memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
- rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs,
+ rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
qla_printk(KERN_WARNING, ha,
@@ -107,8 +110,9 @@ static const struct file_operations dfs_fce_ops = {
};
int
-qla2x00_dfs_setup(scsi_qla_host_t *ha)
+qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
if (!IS_QLA25XX(ha))
goto out;
if (!ha->fce)
@@ -130,7 +134,7 @@ create_dir:
goto create_nodes;
mutex_init(&ha->fce_mutex);
- ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root);
+ ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
if (!ha->dfs_dir) {
qla_printk(KERN_NOTICE, ha,
"DebugFS: Unable to create ha directory.\n");
@@ -152,8 +156,9 @@ out:
}
int
-qla2x00_dfs_remove(scsi_qla_host_t *ha)
+qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
if (ha->dfs_fce) {
debugfs_remove(ha->dfs_fce);
ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d1d14202575a..ee1f1e794c2d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -299,7 +299,8 @@ struct init_cb_24xx {
uint32_t response_q_address[2];
uint32_t prio_request_q_address[2];
- uint8_t reserved_2[8];
+ uint16_t msix;
+ uint8_t reserved_2[6];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
@@ -372,8 +373,9 @@ struct init_cb_24xx {
* BIT 17-31 = Reserved
*/
uint32_t firmware_options_3;
-
- uint8_t reserved_3[24];
+ uint16_t qos;
+ uint16_t rid;
+ uint8_t reserved_3[20];
};
/*
@@ -754,7 +756,8 @@ struct abort_entry_24xx {
uint32_t handle_to_abort; /* System handle to abort. */
- uint8_t reserved_1[32];
+ uint16_t req_que_no;
+ uint8_t reserved_1[30];
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -1258,7 +1261,8 @@ struct qla_npiv_header {
struct qla_npiv_entry {
uint16_t flags;
uint16_t vf_id;
- uint16_t qos;
+ uint8_t q_qos;
+ uint8_t f_qos;
uint16_t unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 753dbe6cce6e..0011e31205db 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xqfullrampup;
extern int ql2xiidmaenable;
+extern int ql2xmaxqueues;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -72,7 +73,10 @@ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
uint16_t, uint16_t);
extern void qla2x00_abort_fcport_cmds(fc_port_t *);
-
+extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
+ struct qla_hw_data *);
+extern void qla2x00_free_host(struct scsi_qla_host *);
+extern void qla2x00_relogin(struct scsi_qla_host *);
/*
* Global Functions in qla_mid.c source file.
*/
@@ -94,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
+extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
@@ -105,10 +109,11 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
-extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
-extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
-extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
-extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
+extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
+extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
+extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
+ uint16_t *);
+extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -119,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
-int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t);
-int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t);
+int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+ uint16_t, uint16_t, uint8_t);
+int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
+ uint16_t, uint16_t, uint8_t);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -154,7 +161,7 @@ extern int
qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
extern int
-qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
+qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
extern int
qla2x00_abort_target(struct fc_port *, unsigned int);
@@ -225,7 +232,7 @@ extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
dma_addr_t);
-extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *);
+extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
extern int qla24xx_abort_target(struct fc_port *, unsigned int);
extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
@@ -264,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
extern irqreturn_t qla2100_intr_handler(int, void *);
extern irqreturn_t qla2300_intr_handler(int, void *);
extern irqreturn_t qla24xx_intr_handler(int, void *);
-extern void qla2x00_process_response_queue(struct scsi_qla_host *);
-extern void qla24xx_process_response_queue(struct scsi_qla_host *);
+extern void qla2x00_process_response_queue(struct rsp_que *);
+extern void qla24xx_process_response_queue(struct rsp_que *);
-extern int qla2x00_request_irqs(scsi_qla_host_t *);
+extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
/*
@@ -367,4 +374,27 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
*/
extern int qla2x00_dfs_setup(scsi_qla_host_t *);
extern int qla2x00_dfs_remove(scsi_qla_host_t *);
+
+/* Globa function prototypes for multi-q */
+extern int qla25xx_request_irq(struct rsp_que *);
+extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *,
+ uint8_t);
+extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *,
+ uint8_t);
+extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
+ uint16_t, uint8_t, uint8_t);
+extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
+ uint16_t);
+extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
+extern void qla2x00_init_response_q_entries(struct rsp_que *);
+extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
+extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
+extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
+extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
+extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
+extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c2a4bfbcb05b..0a6f72973996 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -22,8 +22,9 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
{
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
ms_pkt = ha->ms_iocb;
@@ -59,8 +60,9 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
{
+ struct qla_hw_data *ha = vha->hw;
struct ct_entry_24xx *ct_pkt;
ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -82,7 +84,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
- ct_pkt->vp_index = ha->vp_idx;
+ ct_pkt->vp_index = vha->vp_idx;
return (ct_pkt);
}
@@ -110,16 +112,17 @@ qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size)
}
static int
-qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
+qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
struct ct_sns_rsp *ct_rsp, const char *routine)
{
int rval;
uint16_t comp_status;
+ struct qla_hw_data *ha = vha->hw;
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
- ha->host_no, routine, ms_pkt->entry_status));
+ vha->host_no, routine, ms_pkt->entry_status));
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -133,7 +136,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request:\n", ha->host_no,
+ "rejected request:\n", vha->host_no,
routine));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
@@ -144,7 +147,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x).\n", ha->host_no, routine,
+ "status (%x).\n", vha->host_no, routine,
comp_status));
break;
}
@@ -160,21 +163,21 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
* Returns 0 on success.
*/
int
-qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
{
int rval;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct qla_hw_data *ha = vha->hw;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_ga_nxt(ha, fcport));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_ga_nxt(vha, fcport);
/* Issue GA_NXT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
GA_NXT_RSP_SIZE);
/* Prepare CT request */
@@ -188,13 +191,13 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GA_NXT") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
@@ -216,7 +219,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
- ha->host_no,
+ vha->host_no,
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -242,7 +245,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
* Returns 0 on success.
*/
int
-qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
uint16_t i;
@@ -252,16 +255,16 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
struct ct_sns_rsp *ct_rsp;
struct ct_sns_gid_pt_data *gid_data;
+ struct qla_hw_data *ha = vha->hw;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_gid_pt(ha, list));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gid_pt(vha, list);
gid_data = NULL;
/* Issue GID_PT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
GID_PT_RSP_SIZE);
/* Prepare CT request */
@@ -273,13 +276,13 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GID_PT") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
@@ -320,7 +323,7 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
int
-qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
uint16_t i;
@@ -328,15 +331,15 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct qla_hw_data *ha = vha->hw;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_gpn_id(ha, list));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gpn_id(vha, list);
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
GPN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -350,13 +353,13 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
- "(%d).\n", ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+ "(%d).\n", vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
@@ -381,23 +384,22 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
int
-qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
uint16_t i;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_gnn_id(ha, list));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_gnn_id(vha, list);
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
GNN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -411,13 +413,13 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
- "(%d).\n", ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+ "(%d).\n", vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
@@ -429,7 +431,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
- ha->host_no,
+ vha->host_no,
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -457,21 +459,20 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
int
-qla2x00_rft_id(scsi_qla_host_t *ha)
+qla2x00_rft_id(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_rft_id(ha));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_rft_id(vha);
/* Issue RFT_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
RFT_ID_RSP_SIZE);
/* Prepare CT request */
@@ -480,25 +481,25 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id, FC-4 types */
- ct_req->req.rft_id.port_id[0] = ha->d_id.b.domain;
- ct_req->req.rft_id.port_id[1] = ha->d_id.b.area;
- ct_req->req.rft_id.port_id[2] = ha->d_id.b.al_pa;
+ ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFT_ID") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -511,23 +512,23 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2x00_rff_id(scsi_qla_host_t *ha)
+qla2x00_rff_id(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
- "ISP2100/ISP2200.\n", ha->host_no));
+ "ISP2100/ISP2200.\n", vha->host_no));
return (QLA_SUCCESS);
}
/* Issue RFF_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
RFF_ID_RSP_SIZE);
/* Prepare CT request */
@@ -536,26 +537,26 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = ha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa;
+ ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
ct_req->req.rff_id.fc4_feature = BIT_1;
ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -568,21 +569,20 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2x00_rnn_id(scsi_qla_host_t *ha)
+qla2x00_rnn_id(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- return (qla2x00_sns_rnn_id(ha));
- }
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ return qla2x00_sns_rnn_id(vha);
/* Issue RNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
RNN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -591,33 +591,34 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id, node_name */
- ct_req->req.rnn_id.port_id[0] = ha->d_id.b.domain;
- ct_req->req.rnn_id.port_id[1] = ha->d_id.b.area;
- ct_req->req.rnn_id.port_id[2] = ha->d_id.b.al_pa;
+ ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
+ ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
+ ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
- memcpy(ct_req->req.rnn_id.node_name, ha->node_name, WWN_SIZE);
+ memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RNN_ID") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
}
void
-qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
{
+ struct qla_hw_data *ha = vha->hw;
sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version, qla2x00_version_str);
@@ -630,23 +631,24 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
* Returns 0 on success.
*/
int
-qla2x00_rsnn_nn(scsi_qla_host_t *ha)
+qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
- "ISP2100/ISP2200.\n", ha->host_no));
+ "ISP2100/ISP2200.\n", vha->host_no));
return (QLA_SUCCESS);
}
/* Issue RSNN_NN */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -654,10 +656,10 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- node_name, symbolic node_name, size */
- memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE);
+ memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
/* Prepare the Symbolic Node Name */
- qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name);
+ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
/* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
@@ -669,18 +671,18 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RSNN_NN") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -696,11 +698,12 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
* Returns a pointer to the @ha's sns_cmd.
*/
static inline struct sns_cmd_pkt *
-qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
+qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
uint16_t data_size)
{
uint16_t wc;
struct sns_cmd_pkt *sns_cmd;
+ struct qla_hw_data *ha = vha->hw;
sns_cmd = ha->sns_cmd;
memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
@@ -726,15 +729,15 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
* Returns 0 on success.
*/
static int
-qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
struct sns_cmd_pkt *sns_cmd;
/* Issue GA_NXT. */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
GA_NXT_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_id. */
@@ -743,16 +746,16 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
- "ga_nxt_rsp:\n", ha->host_no));
+ "ga_nxt_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -772,7 +775,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
- ha->host_no,
+ vha->host_no,
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -800,33 +803,33 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
* Returns 0 on success.
*/
static int
-qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
uint16_t i;
uint8_t *entry;
struct sns_cmd_pkt *sns_cmd;
/* Issue GID_PT. */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
GID_PT_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_type. */
sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else if (sns_cmd->p.gid_data[8] != 0x80 ||
sns_cmd->p.gid_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
- "gid_rsp:\n", ha->host_no));
+ "gid_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -867,17 +870,17 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
static int
-qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GPN_ID */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, GPN_ID_CMD,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_id. */
@@ -886,16 +889,16 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
- "(%d).\n", ha->host_no, rval));
+ "(%d).\n", vha->host_no, rval));
} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
sns_cmd->p.gpn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
- "request, gpn_rsp:\n", ha->host_no));
+ "request, gpn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -922,17 +925,17 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
static int
-qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
uint16_t i;
struct sns_cmd_pkt *sns_cmd;
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GNN_ID */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, GNN_ID_CMD,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_id. */
@@ -941,16 +944,16 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
- "(%d).\n", ha->host_no, rval));
+ "(%d).\n", vha->host_no, rval));
} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
sns_cmd->p.gnn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
- "request, gnn_rsp:\n", ha->host_no));
+ "request, gnn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -962,7 +965,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
- ha->host_no,
+ vha->host_no,
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -992,40 +995,40 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
* Returns 0 on success.
*/
static int
-qla2x00_sns_rft_id(scsi_qla_host_t *ha)
+qla2x00_sns_rft_id(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
struct sns_cmd_pkt *sns_cmd;
/* Issue RFT_ID. */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
RFT_ID_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_id, FC-4 types */
- sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa;
- sns_cmd->p.cmd.param[1] = ha->d_id.b.area;
- sns_cmd->p.cmd.param[2] = ha->d_id.b.domain;
+ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+ sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else if (sns_cmd->p.rft_data[8] != 0x80 ||
sns_cmd->p.rft_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
- "rft_rsp:\n", ha->host_no));
+ "rft_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -1041,47 +1044,47 @@ qla2x00_sns_rft_id(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
+qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
struct sns_cmd_pkt *sns_cmd;
/* Issue RNN_ID. */
/* Prepare SNS command request. */
- sns_cmd = qla2x00_prep_sns_cmd(ha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
+ sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
RNN_ID_SNS_DATA_SIZE);
/* Prepare SNS command arguments -- port_id, nodename. */
- sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa;
- sns_cmd->p.cmd.param[1] = ha->d_id.b.area;
- sns_cmd->p.cmd.param[2] = ha->d_id.b.domain;
-
- sns_cmd->p.cmd.param[4] = ha->node_name[7];
- sns_cmd->p.cmd.param[5] = ha->node_name[6];
- sns_cmd->p.cmd.param[6] = ha->node_name[5];
- sns_cmd->p.cmd.param[7] = ha->node_name[4];
- sns_cmd->p.cmd.param[8] = ha->node_name[3];
- sns_cmd->p.cmd.param[9] = ha->node_name[2];
- sns_cmd->p.cmd.param[10] = ha->node_name[1];
- sns_cmd->p.cmd.param[11] = ha->node_name[0];
+ sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
+ sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
+ sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
+
+ sns_cmd->p.cmd.param[4] = vha->node_name[7];
+ sns_cmd->p.cmd.param[5] = vha->node_name[6];
+ sns_cmd->p.cmd.param[6] = vha->node_name[5];
+ sns_cmd->p.cmd.param[7] = vha->node_name[4];
+ sns_cmd->p.cmd.param[8] = vha->node_name[3];
+ sns_cmd->p.cmd.param[9] = vha->node_name[2];
+ sns_cmd->p.cmd.param[10] = vha->node_name[1];
+ sns_cmd->p.cmd.param[11] = vha->node_name[0];
/* Execute SNS command. */
- rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
+ rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
sns_cmd->p.rnn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
- "rnn_rsp:\n", ha->host_no));
+ "rnn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -1094,25 +1097,25 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
+qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
{
int ret;
uint16_t mb[MAILBOX_REGISTER_COUNT];
-
+ struct qla_hw_data *ha = vha->hw;
ret = QLA_SUCCESS;
- if (ha->flags.management_server_logged_in)
+ if (vha->flags.management_server_logged_in)
return ret;
- ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
+ ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
- __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
+ __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]));
ret = QLA_FUNCTION_FAILED;
} else
- ha->flags.management_server_logged_in = 1;
+ vha->flags.management_server_logged_in = 1;
return ret;
}
@@ -1126,17 +1129,17 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
uint32_t rsp_size)
{
ms_iocb_entry_t *ms_pkt;
-
+ struct qla_hw_data *ha = vha->hw;
ms_pkt = ha->ms_iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
ms_pkt->entry_type = MS_IOCB_TYPE;
ms_pkt->entry_count = 1;
- SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
+ SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
@@ -1164,17 +1167,18 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
uint32_t rsp_size)
{
struct ct_entry_24xx *ct_pkt;
+ struct qla_hw_data *ha = vha->hw;
ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
+ ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1188,14 +1192,15 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
- ct_pkt->vp_index = ha->vp_idx;
+ ct_pkt->vp_index = vha->vp_idx;
return ct_pkt;
}
static inline ms_iocb_entry_t *
-qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
+qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
{
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -1240,7 +1245,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
* Returns 0 on success.
*/
static int
-qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
+qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
{
int rval, alen;
uint32_t size, sn;
@@ -1250,11 +1255,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
struct ct_sns_rsp *ct_rsp;
uint8_t *entries;
struct ct_fdmi_hba_attr *eiter;
+ struct qla_hw_data *ha = vha->hw;
/* Issue RHBA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1262,9 +1268,9 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
- memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
size = 2 * WWN_SIZE + 4 + 4;
/* Attributes */
@@ -1276,11 +1282,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
- memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
+ memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- __func__, ha->host_no,
+ __func__, vha->host_no,
eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
eiter->a.node_name[6], eiter->a.node_name[7]));
@@ -1294,7 +1300,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
eiter->a.manufacturer));
/* Serial number. */
@@ -1307,7 +1313,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
eiter->a.serial_num));
/* Model name. */
@@ -1319,7 +1325,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
eiter->a.model));
/* Model description. */
@@ -1332,7 +1338,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
eiter->a.model_desc));
/* Hardware version. */
@@ -1344,7 +1350,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
eiter->a.hw_version));
/* Driver version. */
@@ -1356,7 +1362,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
eiter->a.driver_version));
/* Option ROM version. */
@@ -1368,27 +1374,27 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
eiter->a.orom_version));
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
- ha->isp_ops->fw_version_str(ha, eiter->a.fw_version);
+ ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
alen = strlen(eiter->a.fw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
eiter->a.fw_version));
/* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(ha, size + 16);
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
DEBUG13(printk("%s(%ld): RHBA identifier="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- ha->host_no, ct_req->req.rhba.hba_identifier[0],
+ vha->host_no, ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
@@ -1399,25 +1405,25 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
rval = QLA_ALREADY_REGISTERED;
}
} else {
DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1430,17 +1436,17 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
+qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
/* Issue RPA */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
/* Prepare CT request */
@@ -1449,28 +1455,28 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- portname. */
- memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
DEBUG13(printk("%s(%ld): DHBA portname="
- "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
+ "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1483,11 +1489,11 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
{
int rval, alen;
uint32_t size, max_frame_size;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
@@ -1498,7 +1504,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
/* Issue RPA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1506,7 +1512,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
- memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
+ memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
size = WWN_SIZE + 4;
/* Attributes */
@@ -1521,8 +1527,9 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
- DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
- eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
+ DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
+ vha->host_no, eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]));
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1544,7 +1551,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
eiter->a.sup_speed));
/* Current speed. */
@@ -1575,7 +1582,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
}
size += 4 + 4;
- DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
eiter->a.cur_speed));
/* Max frame size. */
@@ -1588,7 +1595,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
eiter->a.max_frame_size));
/* OS device name. */
@@ -1600,32 +1607,32 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
+ DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
eiter->a.os_dev_name));
/* Hostname. */
- if (strlen(fc_host_system_hostname(ha->host))) {
+ if (strlen(fc_host_system_hostname(vha->host))) {
ct_req->req.rpa.attrs.count =
__constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
- "%s", fc_host_system_hostname(ha->host));
+ "%s", fc_host_system_hostname(vha->host));
alen = strlen(eiter->a.host_name);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
- ha->host_no, eiter->a.host_name));
+ vha->host_no, eiter->a.host_name));
}
/* Update MS request size. */
- qla2x00_update_ms_fdmi_iocb(ha, size + 16);
+ qla2x00_update_ms_fdmi_iocb(vha, size + 16);
DEBUG13(printk("%s(%ld): RPA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- ha->host_no, ct_req->req.rpa.port_name[0],
+ vha->host_no, ct_req->req.rpa.port_name[0],
ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
@@ -1633,18 +1640,18 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
- ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
+ vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1657,34 +1664,28 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2x00_fdmi_register(scsi_qla_host_t *ha)
+qla2x00_fdmi_register(scsi_qla_host_t *vha)
{
int rval;
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- DEBUG2(printk("scsi(%ld): FDMI unsupported on "
- "ISP2100/ISP2200.\n", ha->host_no));
- return QLA_SUCCESS;
- }
-
- rval = qla2x00_mgmt_svr_login(ha);
+ rval = qla2x00_mgmt_svr_login(vha);
if (rval)
return rval;
- rval = qla2x00_fdmi_rhba(ha);
+ rval = qla2x00_fdmi_rhba(vha);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
- rval = qla2x00_fdmi_dhba(ha);
+ rval = qla2x00_fdmi_dhba(vha);
if (rval)
return rval;
- rval = qla2x00_fdmi_rhba(ha);
+ rval = qla2x00_fdmi_rhba(vha);
if (rval)
return rval;
}
- rval = qla2x00_fdmi_rpa(ha);
+ rval = qla2x00_fdmi_rpa(vha);
return rval;
}
@@ -1697,11 +1698,11 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
uint16_t i;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
@@ -1712,7 +1713,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE,
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
GFPN_ID_RSP_SIZE);
/* Prepare CT request */
@@ -1726,13 +1727,13 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
- "failed (%d).\n", ha->host_no, rval));
- } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+ "failed (%d).\n", vha->host_no, rval));
+ } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
@@ -1750,17 +1751,17 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
}
static inline void *
-qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
+qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
uint32_t rsp_size)
{
struct ct_entry_24xx *ct_pkt;
-
+ struct qla_hw_data *ha = vha->hw;
ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
+ ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1774,7 +1775,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
- ct_pkt->vp_index = ha->vp_idx;
+ ct_pkt->vp_index = vha->vp_idx;
return ct_pkt;
}
@@ -1803,11 +1804,11 @@ qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
* Returns 0 on success.
*/
int
-qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
+qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
{
int rval;
uint16_t i;
-
+ struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
@@ -1817,14 +1818,14 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
if (!ha->flags.gpsc_supported)
return QLA_FUNCTION_FAILED;
- rval = qla2x00_mgmt_svr_login(ha);
+ rval = qla2x00_mgmt_svr_login(vha);
if (rval)
return rval;
for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE,
+ ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
GPSC_RSP_SIZE);
/* Prepare CT request */
@@ -1837,13 +1838,13 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
WWN_SIZE);
/* Execute MS IOCB */
- rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
+ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
- "failed (%d).\n", ha->host_no, rval));
- } else if ((rval = qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp,
+ "failed (%d).\n", vha->host_no, rval));
+ } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPSC")) != QLA_SUCCESS) {
/* FM command unsupported? */
if (rval == QLA_INVALID_COMMAND &&
@@ -1853,7 +1854,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
CT_REASON_COMMAND_UNSUPPORTED)) {
DEBUG2(printk("scsi(%ld): GPSC command "
"unsupported, disabling query...\n",
- ha->host_no));
+ vha->host_no));
ha->flags.gpsc_supported = 0;
rval = QLA_FUNCTION_FAILED;
break;
@@ -1878,7 +1879,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
"fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
- "speed=%04x.\n", ha->host_no,
+ "speed=%04x.\n", vha->host_no,
list[i].fabric_port_name[0],
list[i].fabric_port_name[1],
list[i].fabric_port_name[2],
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a470f2d3270d..52ed56ecf195 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_gbl.h"
#include <linux/delay.h>
#include <linux/vmalloc.h>
@@ -21,7 +22,6 @@
static int qla2x00_isp_firmware(scsi_qla_host_t *);
static void qla2x00_resize_request_q(scsi_qla_host_t *);
static int qla2x00_setup_chip(scsi_qla_host_t *);
-static void qla2x00_init_response_q_entries(scsi_qla_host_t *);
static int qla2x00_init_rings(scsi_qla_host_t *);
static int qla2x00_fw_ready(scsi_qla_host_t *);
static int qla2x00_configure_hba(scsi_qla_host_t *);
@@ -35,10 +35,11 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
static int qla2x00_restart_isp(scsi_qla_host_t *);
-static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev);
+static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
+static int qla25xx_init_queues(struct qla_hw_data *);
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
@@ -55,77 +56,81 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
* 0 = success
*/
int
-qla2x00_initialize_adapter(scsi_qla_host_t *ha)
+qla2x00_initialize_adapter(scsi_qla_host_t *vha)
{
int rval;
-
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/* Clear adapter flags. */
- ha->flags.online = 0;
- ha->flags.reset_active = 0;
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- atomic_set(&ha->loop_state, LOOP_DOWN);
- ha->device_flags = DFLG_NO_CABLE;
- ha->dpc_flags = 0;
- ha->flags.management_server_logged_in = 0;
- ha->marker_needed = 0;
+ vha->flags.online = 0;
+ vha->flags.reset_active = 0;
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->device_flags = DFLG_NO_CABLE;
+ vha->dpc_flags = 0;
+ vha->flags.management_server_logged_in = 0;
+ vha->marker_needed = 0;
ha->mbx_flags = 0;
ha->isp_abort_cnt = 0;
ha->beacon_blink_led = 0;
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
- rval = ha->isp_ops->pci_config(ha);
+ rval = ha->isp_ops->pci_config(vha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
- ha->host_no));
+ vha->host_no));
return (rval);
}
- ha->isp_ops->reset_chip(ha);
+ ha->isp_ops->reset_chip(vha);
- rval = qla2xxx_get_flash_info(ha);
+ rval = qla2xxx_get_flash_info(vha);
if (rval) {
DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
- ha->host_no));
+ vha->host_no));
return (rval);
}
- ha->isp_ops->get_flash_version(ha, ha->request_ring);
+ ha->isp_ops->get_flash_version(vha, req->ring);
qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
- ha->isp_ops->nvram_config(ha);
+ ha->isp_ops->nvram_config(vha);
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
"%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
- ha->port_name[0], ha->port_name[1],
- ha->port_name[2], ha->port_name[3],
- ha->port_name[4], ha->port_name[5],
- ha->port_name[6], ha->port_name[7]);
+ vha->port_name[0], vha->port_name[1],
+ vha->port_name[2], vha->port_name[3],
+ vha->port_name[4], vha->port_name[5],
+ vha->port_name[6], vha->port_name[7]);
return QLA_FUNCTION_FAILED;
}
qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
- if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
- rval = ha->isp_ops->chip_diag(ha);
+ if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
+ rval = ha->isp_ops->chip_diag(vha);
if (rval)
return (rval);
- rval = qla2x00_setup_chip(ha);
+ rval = qla2x00_setup_chip(vha);
if (rval)
return (rval);
}
if (IS_QLA84XX(ha)) {
- ha->cs84xx = qla84xx_get_chip(ha);
+ ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
qla_printk(KERN_ERR, ha,
"Unable to configure ISP84XX.\n");
return QLA_FUNCTION_FAILED;
}
}
- rval = qla2x00_init_rings(ha);
+ rval = qla2x00_init_rings(vha);
return (rval);
}
@@ -137,11 +142,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2100_pci_config(scsi_qla_host_t *ha)
+qla2100_pci_config(scsi_qla_host_t *vha)
{
uint16_t w;
- uint32_t d;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
@@ -151,10 +156,7 @@ qla2100_pci_config(scsi_qla_host_t *ha)
w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
- /* Reset expansion ROM address decode enable */
- pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
- d &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
+ pci_disable_rom(ha->pdev);
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -171,12 +173,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2300_pci_config(scsi_qla_host_t *ha)
+qla2300_pci_config(scsi_qla_host_t *vha)
{
uint16_t w;
- uint32_t d;
unsigned long flags = 0;
uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
pci_set_master(ha->pdev);
@@ -236,10 +238,7 @@ qla2300_pci_config(scsi_qla_host_t *ha)
pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
- /* Reset expansion ROM address decode enable */
- pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
- d &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
+ pci_disable_rom(ha->pdev);
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -256,11 +255,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla24xx_pci_config(scsi_qla_host_t *ha)
+qla24xx_pci_config(scsi_qla_host_t *vha)
{
uint16_t w;
- uint32_t d;
unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
pci_set_master(ha->pdev);
@@ -281,10 +280,7 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
pcie_set_readrq(ha->pdev, 2048);
- /* Reset expansion ROM address decode enable */
- pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
- d &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
+ pci_disable_rom(ha->pdev);
ha->chip_revision = ha->pdev->revision;
@@ -303,10 +299,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla25xx_pci_config(scsi_qla_host_t *ha)
+qla25xx_pci_config(scsi_qla_host_t *vha)
{
uint16_t w;
- uint32_t d;
+ struct qla_hw_data *ha = vha->hw;
pci_set_master(ha->pdev);
pci_try_set_mwi(ha->pdev);
@@ -320,10 +316,7 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
pcie_set_readrq(ha->pdev, 2048);
- /* Reset expansion ROM address decode enable */
- pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
- d &= ~PCI_ROM_ADDRESS_ENABLE;
- pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
+ pci_disable_rom(ha->pdev);
ha->chip_revision = ha->pdev->revision;
@@ -337,32 +330,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_isp_firmware(scsi_qla_host_t *ha)
+qla2x00_isp_firmware(scsi_qla_host_t *vha)
{
int rval;
uint16_t loop_id, topo, sw_cap;
uint8_t domain, area, al_pa;
+ struct qla_hw_data *ha = vha->hw;
/* Assume loading risc code */
rval = QLA_FUNCTION_FAILED;
if (ha->flags.disable_risc_code_load) {
DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
/* Verify checksum of loaded RISC code. */
- rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
+ rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
if (rval == QLA_SUCCESS) {
/* And, verify we are not in ROM code. */
- rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
+ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
&area, &domain, &topo, &sw_cap);
}
}
if (rval) {
DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -375,9 +369,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
void
-qla2x00_reset_chip(scsi_qla_host_t *ha)
+qla2x00_reset_chip(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t cnt;
uint16_t cmd;
@@ -515,10 +510,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static inline void
-qla24xx_reset_risc(scsi_qla_host_t *ha)
+qla24xx_reset_risc(scsi_qla_host_t *vha)
{
int hw_evt = 0;
unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t cnt, d2;
uint16_t wd;
@@ -557,7 +553,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
barrier();
}
if (cnt == 0 || hw_evt)
- qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR,
+ qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
RD_REG_WORD(&reg->mailbox3));
@@ -587,12 +583,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
void
-qla24xx_reset_chip(scsi_qla_host_t *ha)
+qla24xx_reset_chip(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
- qla24xx_reset_risc(ha);
+ qla24xx_reset_risc(vha);
}
/**
@@ -602,20 +599,22 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
int
-qla2x00_chip_diag(scsi_qla_host_t *ha)
+qla2x00_chip_diag(scsi_qla_host_t *vha)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags = 0;
uint16_t data;
uint32_t cnt;
uint16_t mb[5];
+ struct req_que *req = ha->req_q_map[0];
/* Assume a failed state */
rval = QLA_FUNCTION_FAILED;
DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
- ha->host_no, (u_long)&reg->flash_address));
+ vha->host_no, (u_long)&reg->flash_address));
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -678,17 +677,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
ha->product_id[3] = mb[4];
/* Adjust fw RISC transfer size */
- if (ha->request_q_length > 1024)
+ if (req->length > 1024)
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
else
ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
- ha->request_q_length;
+ req->length;
if (IS_QLA2200(ha) &&
RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
/* Limit firmware transfer size with a 2200A */
DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
- ha->host_no));
+ vha->host_no));
ha->device_type |= DT_ISP2200A;
ha->fw_transfer_size = 128;
@@ -697,11 +696,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
/* Wrap Incoming Mailboxes Test. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no));
- rval = qla2x00_mbx_reg_test(ha);
+ DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
+ rval = qla2x00_mbx_reg_test(vha);
if (rval) {
DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha,
"Failed mailbox send register test\n");
}
@@ -714,7 +713,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
chip_diag_failed:
if (rval)
DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
- "****\n", ha->host_no));
+ "****\n", vha->host_no));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -728,19 +727,21 @@ chip_diag_failed:
* Returns 0 on success.
*/
int
-qla24xx_chip_diag(scsi_qla_host_t *ha)
+qla24xx_chip_diag(scsi_qla_host_t *vha)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/* Perform RISC reset. */
- qla24xx_reset_risc(ha);
+ qla24xx_reset_risc(vha);
- ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
- rval = qla2x00_mbx_reg_test(ha);
+ rval = qla2x00_mbx_reg_test(vha);
if (rval) {
DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha,
"Failed mailbox send register test\n");
} else {
@@ -752,13 +753,16 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
}
void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size;
+ eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
if (ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
@@ -767,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
}
ha->fw_dumped = 0;
- fixed_size = mem_size = eft_size = fce_size = 0;
+ fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
fixed_size = sizeof(struct qla2100_fw_dump);
} else if (IS_QLA23XX(ha)) {
@@ -776,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
fixed_size = IS_QLA25XX(ha) ?
- offsetof(struct qla25xx_fw_dump, ext_mem):
- offsetof(struct qla24xx_fw_dump, ext_mem);
+ offsetof(struct qla25xx_fw_dump, ext_mem) :
+ offsetof(struct qla24xx_fw_dump, ext_mem);
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
+ if (ha->mqenable)
+ mq_size = sizeof(struct qla2xxx_mq_chain);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha))
@@ -794,7 +800,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
}
memset(tc, 0, FCE_SIZE);
- rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS,
+ rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -823,7 +829,7 @@ try_eft:
}
memset(tc, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
+ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
if (rval) {
qla_printk(KERN_WARNING, ha, "Unable to initialize "
"EFT (%d).\n", rval);
@@ -840,12 +846,12 @@ try_eft:
ha->eft = tc;
}
cont_alloc:
- req_q_size = ha->request_q_length * sizeof(request_t);
- rsp_q_size = ha->response_q_length * sizeof(response_t);
+ req_q_size = req->length * sizeof(request_t);
+ rsp_q_size = rsp->length * sizeof(response_t);
dump_size = offsetof(struct qla2xxx_fw_dump, isp);
dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
- eft_size + fce_size;
+ mq_size + eft_size + fce_size;
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
@@ -860,7 +866,6 @@ cont_alloc:
}
return;
}
-
qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
dump_size / 1024);
@@ -891,27 +896,29 @@ cont_alloc:
* Returns 0 on success.
*/
static void
-qla2x00_resize_request_q(scsi_qla_host_t *ha)
+qla2x00_resize_request_q(scsi_qla_host_t *vha)
{
int rval;
uint16_t fw_iocb_cnt = 0;
uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
dma_addr_t request_dma;
request_t *request_ring;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/* Valid only on recent ISPs. */
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return;
/* Retrieve IOCB counts available to the firmware. */
- rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt,
- &ha->max_npiv_vports);
+ rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
+ &ha->max_npiv_vports);
if (rval)
return;
/* No point in continuing if current settings are sufficient. */
if (fw_iocb_cnt < 1024)
return;
- if (ha->request_q_length >= request_q_length)
+ if (req->length >= request_q_length)
return;
/* Attempt to claim larger area for request queue. */
@@ -925,17 +932,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
(ha->fw_memory_size + 1) / 1024);
qla_printk(KERN_INFO, ha, "Resizing request queue depth "
- "(%d -> %d)...\n", ha->request_q_length, request_q_length);
+ "(%d -> %d)...\n", req->length, request_q_length);
/* Clear old allocations. */
dma_free_coherent(&ha->pdev->dev,
- (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring,
- ha->request_dma);
+ (req->length + 1) * sizeof(request_t), req->ring,
+ req->dma);
/* Begin using larger queue. */
- ha->request_q_length = request_q_length;
- ha->request_ring = request_ring;
- ha->request_dma = request_dma;
+ req->length = request_q_length;
+ req->ring = request_ring;
+ req->dma = request_dma;
}
/**
@@ -945,10 +952,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_setup_chip(scsi_qla_host_t *ha)
+qla2x00_setup_chip(scsi_qla_host_t *vha)
{
int rval;
uint32_t srisc_address = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
@@ -961,29 +969,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
}
/* Load firmware sequences */
- rval = ha->isp_ops->load_risc(ha, &srisc_address);
+ rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
- "code.\n", ha->host_no));
+ "code.\n", vha->host_no));
- rval = qla2x00_verify_checksum(ha, srisc_address);
+ rval = qla2x00_verify_checksum(vha, srisc_address);
if (rval == QLA_SUCCESS) {
/* Start firmware execution. */
DEBUG(printk("scsi(%ld): Checksum OK, start "
- "firmware.\n", ha->host_no));
+ "firmware.\n", vha->host_no));
- rval = qla2x00_execute_fw(ha, srisc_address);
+ rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
- qla2x00_get_fw_version(ha,
+ qla2x00_get_fw_version(vha,
&ha->fw_major_version,
&ha->fw_minor_version,
&ha->fw_subminor_version,
&ha->fw_attributes, &ha->fw_memory_size);
- qla2x00_resize_request_q(ha);
ha->flags.npiv_supported = 0;
- if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) ||
- IS_QLA84XX(ha)) &&
+ if (IS_QLA2XXX_MIDTYPE(ha) &&
(ha->fw_attributes & BIT_2)) {
ha->flags.npiv_supported = 1;
if ((!ha->max_npiv_vports) ||
@@ -992,14 +998,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
ha->max_npiv_vports =
MIN_MULTI_ID_FABRIC - 1;
}
+ qla2x00_resize_request_q(vha);
if (ql2xallocfwdump)
- qla2x00_alloc_fw_dump(ha);
+ qla2x00_alloc_fw_dump(vha);
}
} else {
DEBUG2(printk(KERN_INFO
"scsi(%ld): ISP Firmware failed checksum.\n",
- ha->host_no));
+ vha->host_no));
}
}
@@ -1018,7 +1025,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
if (rval) {
DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -1033,14 +1040,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
*
* Returns 0 on success.
*/
-static void
-qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
+void
+qla2x00_init_response_q_entries(struct rsp_que *rsp)
{
uint16_t cnt;
response_t *pkt;
- pkt = ha->response_ring_ptr;
- for (cnt = 0; cnt < ha->response_q_length; cnt++) {
+ pkt = rsp->ring_ptr;
+ for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
pkt++;
}
@@ -1054,19 +1061,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
void
-qla2x00_update_fw_options(scsi_qla_host_t *ha)
+qla2x00_update_fw_options(scsi_qla_host_t *vha)
{
uint16_t swing, emphasis, tx_sens, rx_sens;
+ struct qla_hw_data *ha = vha->hw;
memset(ha->fw_options, 0, sizeof(ha->fw_options));
- qla2x00_get_fw_options(ha, ha->fw_options);
+ qla2x00_get_fw_options(vha, ha->fw_options);
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return;
/* Serial Link options. */
DEBUG3(printk("scsi(%ld): Serial link options:\n",
- ha->host_no));
+ vha->host_no));
DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
sizeof(ha->fw_seriallink_options)));
@@ -1124,19 +1132,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
ha->fw_options[2] |= BIT_13;
/* Update firmware options. */
- qla2x00_set_fw_options(ha, ha->fw_options);
+ qla2x00_set_fw_options(vha, ha->fw_options);
}
void
-qla24xx_update_fw_options(scsi_qla_host_t *ha)
+qla24xx_update_fw_options(scsi_qla_host_t *vha)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
- rval = qla2x00_set_serdes_params(ha,
+ rval = qla2x00_set_serdes_params(vha,
le16_to_cpu(ha->fw_seriallink_options24[1]),
le16_to_cpu(ha->fw_seriallink_options24[2]),
le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1147,19 +1156,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
}
void
-qla2x00_config_rings(struct scsi_qla_host *ha)
+qla2x00_config_rings(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
/* Setup ring parameters in initialization control block. */
ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
- ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length);
- ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length);
- ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma));
- ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma));
- ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma));
- ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma));
+ ha->init_cb->request_q_length = cpu_to_le16(req->length);
+ ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
+ ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1169,27 +1181,62 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
}
void
-qla24xx_config_rings(struct scsi_qla_host *ha)
+qla24xx_config_rings(struct scsi_qla_host *vha)
{
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ struct qla_msix_entry *msix;
struct init_cb_24xx *icb;
+ uint16_t rid = 0;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
- /* Setup ring parameters in initialization control block. */
+/* Setup ring parameters in initialization control block. */
icb = (struct init_cb_24xx *)ha->init_cb;
icb->request_q_outpointer = __constant_cpu_to_le16(0);
icb->response_q_inpointer = __constant_cpu_to_le16(0);
- icb->request_q_length = cpu_to_le16(ha->request_q_length);
- icb->response_q_length = cpu_to_le16(ha->response_q_length);
- icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma));
- icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma));
- icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma));
- icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma));
-
- WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
- RD_REG_DWORD(&reg->rsp_q_out);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ if (ha->mqenable) {
+ icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+ icb->rid = __constant_cpu_to_le16(rid);
+ if (ha->flags.msix_enabled) {
+ msix = &ha->msix_entries[1];
+ DEBUG2_17(printk(KERN_INFO
+ "Reistering vector 0x%x for base que\n", msix->entry));
+ icb->msix = cpu_to_le16(msix->entry);
+ }
+ /* Use alternate PCI bus number */
+ if (MSB(rid))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_19);
+ /* Use alternate PCI devfn */
+ if (LSB(rid))
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_18);
+
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+ icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
+ ha->rsp_q_map[0]->options = icb->firmware_options_2;
+
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
+ WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ } else {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
+ WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ }
+ /* PCI posting */
+ RD_REG_DWORD(&ioreg->hccr);
}
/**
@@ -1202,11 +1249,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
* Returns 0 on success.
*/
static int
-qla2x00_init_rings(scsi_qla_host_t *ha)
+qla2x00_init_rings(scsi_qla_host_t *vha)
{
int rval;
unsigned long flags = 0;
int cnt;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
struct mid_init_cb_24xx *mid_init_cb =
(struct mid_init_cb_24xx *) ha->init_cb;
@@ -1214,45 +1264,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
/* Clear outstanding commands array. */
for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
- ha->outstanding_cmds[cnt] = NULL;
+ req->outstanding_cmds[cnt] = NULL;
- ha->current_outstanding_cmd = 0;
+ req->current_outstanding_cmd = 0;
/* Clear RSCN queue. */
- ha->rscn_in_ptr = 0;
- ha->rscn_out_ptr = 0;
+ vha->rscn_in_ptr = 0;
+ vha->rscn_out_ptr = 0;
/* Initialize firmware. */
- ha->request_ring_ptr = ha->request_ring;
- ha->req_ring_index = 0;
- ha->req_q_cnt = ha->request_q_length;
- ha->response_ring_ptr = ha->response_ring;
- ha->rsp_ring_index = 0;
+ req->ring_ptr = req->ring;
+ req->ring_index = 0;
+ req->cnt = req->length;
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
/* Initialize response queue entries */
- qla2x00_init_response_q_entries(ha);
+ qla2x00_init_response_q_entries(rsp);
- ha->isp_ops->config_rings(ha);
+ ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Update any ISP specific firmware options before initialization. */
- ha->isp_ops->update_fw_options(ha);
+ ha->isp_ops->update_fw_options(vha);
- DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
+ DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
if (ha->flags.npiv_supported)
mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
- rval = qla2x00_init_firmware(ha, ha->init_cb_size);
+ rval = qla2x00_init_firmware(vha, ha->init_cb_size);
if (rval) {
DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
- ha->host_no));
+ vha->host_no));
} else {
DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -1265,13 +1315,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
* Returns 0 on success.
*/
static int
-qla2x00_fw_ready(scsi_qla_host_t *ha)
+qla2x00_fw_ready(scsi_qla_host_t *vha)
{
int rval;
unsigned long wtime, mtime, cs84xx_time;
uint16_t min_wait; /* Minimum wait time if loop is down */
uint16_t wait_time; /* Wait time if loop is coming ready */
uint16_t state[3];
+ struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
@@ -1293,29 +1344,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
wtime = jiffies + (wait_time * HZ);
/* Wait for ISP to finish LIP */
- if (!ha->flags.init_done)
+ if (!vha->flags.init_done)
qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
- ha->host_no));
+ vha->host_no));
do {
- rval = qla2x00_get_firmware_state(ha, state);
+ rval = qla2x00_get_firmware_state(vha, state);
if (rval == QLA_SUCCESS) {
if (state[0] < FSTATE_LOSS_OF_SYNC) {
- ha->device_flags &= ~DFLG_NO_CABLE;
+ vha->device_flags &= ~DFLG_NO_CABLE;
}
if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
DEBUG16(printk("scsi(%ld): fw_state=%x "
- "84xx=%x.\n", ha->host_no, state[0],
+ "84xx=%x.\n", vha->host_no, state[0],
state[2]));
if ((state[2] & FSTATE_LOGGED_IN) &&
(state[2] & FSTATE_WAITING_FOR_VERIFY)) {
DEBUG16(printk("scsi(%ld): Sending "
- "verify iocb.\n", ha->host_no));
+ "verify iocb.\n", vha->host_no));
cs84xx_time = jiffies;
- rval = qla84xx_init_chip(ha);
+ rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS)
break;
@@ -1325,13 +1376,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
mtime += cs84xx_time;
DEBUG16(printk("scsi(%ld): Increasing "
"wait time by %ld. New time %ld\n",
- ha->host_no, cs84xx_time, wtime));
+ vha->host_no, cs84xx_time, wtime));
}
} else if (state[0] == FSTATE_READY) {
DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
- ha->host_no));
+ vha->host_no));
- qla2x00_get_retry_cnt(ha, &ha->retry_count,
+ qla2x00_get_retry_cnt(vha, &ha->retry_count,
&ha->login_timeout, &ha->r_a_tov);
rval = QLA_SUCCESS;
@@ -1340,7 +1391,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
rval = QLA_FUNCTION_FAILED;
- if (atomic_read(&ha->loop_down_timer) &&
+ if (atomic_read(&vha->loop_down_timer) &&
state[0] != FSTATE_READY) {
/* Loop down. Timeout on min_wait for states
* other than Wait for Login.
@@ -1349,7 +1400,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
qla_printk(KERN_INFO, ha,
"Cable is unplugged...\n");
- ha->device_flags |= DFLG_NO_CABLE;
+ vha->device_flags |= DFLG_NO_CABLE;
break;
}
}
@@ -1366,15 +1417,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
msleep(500);
DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
- ha->host_no, state[0], jiffies));
+ vha->host_no, state[0], jiffies));
} while (1);
DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
- ha->host_no, state[0], jiffies));
+ vha->host_no, state[0], jiffies));
if (rval) {
DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
- ha->host_no));
+ vha->host_no));
}
return (rval);
@@ -1394,7 +1445,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
* Kernel context.
*/
static int
-qla2x00_configure_hba(scsi_qla_host_t *ha)
+qla2x00_configure_hba(scsi_qla_host_t *vha)
{
int rval;
uint16_t loop_id;
@@ -1404,19 +1455,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
uint8_t area;
uint8_t domain;
char connect_type[22];
+ struct qla_hw_data *ha = vha->hw;
/* Get host addresses. */
- rval = qla2x00_get_adapter_id(ha,
+ rval = qla2x00_get_adapter_id(vha,
&loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
if (rval != QLA_SUCCESS) {
- if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
+ if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
} else {
qla_printk(KERN_WARNING, ha,
"ERROR -- Unable to get host loop ID.\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
}
@@ -1427,7 +1479,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
return (QLA_FUNCTION_FAILED);
}
- ha->loop_id = loop_id;
+ vha->loop_id = loop_id;
/* initialize */
ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1437,14 +1489,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
switch (topo) {
case 0:
DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
- ha->host_no));
+ vha->host_no));
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
case 1:
DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
- ha->host_no));
+ vha->host_no));
ha->switch_cap = sw_cap;
ha->current_topology = ISP_CFG_FL;
strcpy(connect_type, "(FL_Port)");
@@ -1452,7 +1504,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 2:
DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
- ha->host_no));
+ vha->host_no));
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_N;
strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1460,7 +1512,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
case 3:
DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
- ha->host_no));
+ vha->host_no));
ha->switch_cap = sw_cap;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_F;
@@ -1470,7 +1522,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
default:
DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
"Using NL.\n",
- ha->host_no, topo));
+ vha->host_no, topo));
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -1478,29 +1530,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
/* Save Host port and loop ID. */
/* byte order - Big Endian */
- ha->d_id.b.domain = domain;
- ha->d_id.b.area = area;
- ha->d_id.b.al_pa = al_pa;
+ vha->d_id.b.domain = domain;
+ vha->d_id.b.area = area;
+ vha->d_id.b.al_pa = al_pa;
- if (!ha->flags.init_done)
+ if (!vha->flags.init_done)
qla_printk(KERN_INFO, ha,
"Topology - %s, Host Loop address 0x%x\n",
- connect_type, ha->loop_id);
+ connect_type, vha->loop_id);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no));
+ DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
} else {
- DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no));
+ DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
}
return(rval);
}
static inline void
-qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def)
+qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+ char *def)
{
char *st, *en;
uint16_t index;
+ struct qla_hw_data *ha = vha->hw;
if (memcmp(model, BINZERO, len) != 0) {
strncpy(ha->model_number, model, len);
@@ -1532,16 +1586,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
}
}
if (IS_FWI2_CAPABLE(ha))
- qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
+ qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
sizeof(ha->model_desc));
}
/* On sparc systems, obtain port and node WWN from firmware
* properties.
*/
-static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
+static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
{
#ifdef CONFIG_SPARC
+ struct qla_hw_data *ha = vha->hw;
struct pci_dev *pdev = ha->pdev;
struct device_node *dp = pci_device_to_OF_node(pdev);
const u8 *val;
@@ -1571,12 +1626,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
* 0 = success.
*/
int
-qla2x00_nvram_config(scsi_qla_host_t *ha)
+qla2x00_nvram_config(scsi_qla_host_t *vha)
{
int rval;
uint8_t chksum = 0;
uint16_t cnt;
uint8_t *dptr1, *dptr2;
+ struct qla_hw_data *ha = vha->hw;
init_cb_t *icb = ha->init_cb;
nvram_t *nv = ha->nvram;
uint8_t *ptr = ha->nvram;
@@ -1592,11 +1648,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
- ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size);
+ ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
chksum += *ptr++;
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+ DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
/* Bad NVRAM data, set defaults parameters. */
@@ -1610,7 +1666,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
"invalid -- WWPN) defaults.\n");
if (chksum)
- qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
+ qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
MSW(chksum), LSW(chksum));
/*
@@ -1647,7 +1703,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
nv->port_name[3] = 224;
nv->port_name[4] = 139;
- qla2xxx_nvram_wwn_from_ofw(ha, nv);
+ qla2xxx_nvram_wwn_from_ofw(vha, nv);
nv->login_timeout = 4;
@@ -1700,7 +1756,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
strcpy(ha->model_number, "QLA2300");
}
} else {
- qla2x00_set_model_info(ha, nv->model_number,
+ qla2x00_set_model_info(vha, nv->model_number,
sizeof(nv->model_number), "QLA23xx");
}
} else if (IS_QLA2200(ha)) {
@@ -1776,8 +1832,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->serial0 = icb->port_name[5];
ha->serial1 = icb->port_name[6];
ha->serial2 = icb->port_name[7];
- ha->node_name = icb->node_name;
- ha->port_name = icb->port_name;
+ memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+ memcpy(vha->port_name, icb->port_name, WWN_SIZE);
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
@@ -1845,10 +1901,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
icb->response_accumulation_timer = 3;
icb->interrupt_delay_timer = 5;
- ha->flags.process_response_queue = 1;
+ vha->flags.process_response_queue = 1;
} else {
/* Enable ZIO. */
- if (!ha->flags.init_done) {
+ if (!vha->flags.init_done) {
ha->zio_mode = icb->add_firmware_options[0] &
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1856,12 +1912,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
}
icb->add_firmware_options[0] &=
~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- ha->flags.process_response_queue = 0;
+ vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
- "delay (%d us).\n", ha->host_no, ha->zio_mode,
+ "delay (%d us).\n", vha->host_no, ha->zio_mode,
ha->zio_timer * 100));
qla_printk(KERN_INFO, ha,
"ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1869,13 +1925,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
- ha->flags.process_response_queue = 1;
+ vha->flags.process_response_queue = 1;
}
}
if (rval) {
DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
+ "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
}
return (rval);
}
@@ -1886,10 +1942,10 @@ qla2x00_rport_del(void *data)
fc_port_t *fcport = data;
struct fc_rport *rport;
- spin_lock_irq(fcport->ha->host->host_lock);
+ spin_lock_irq(fcport->vha->host->host_lock);
rport = fcport->drport;
fcport->drport = NULL;
- spin_unlock_irq(fcport->ha->host->host_lock);
+ spin_unlock_irq(fcport->vha->host->host_lock);
if (rport)
fc_remote_port_delete(rport);
}
@@ -1902,7 +1958,7 @@ qla2x00_rport_del(void *data)
* Returns a pointer to the allocated fcport, or NULL, if none available.
*/
static fc_port_t *
-qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
+qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
{
fc_port_t *fcport;
@@ -1911,8 +1967,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
return NULL;
/* Setup fcport template structure. */
- fcport->ha = ha;
- fcport->vp_idx = ha->vp_idx;
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1935,101 +1991,97 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
* 2 = database was full and device was not configured.
*/
static int
-qla2x00_configure_loop(scsi_qla_host_t *ha)
+qla2x00_configure_loop(scsi_qla_host_t *vha)
{
int rval;
unsigned long flags, save_flags;
-
+ struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
/* Get Initiator ID */
- if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) {
- rval = qla2x00_configure_hba(ha);
+ if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
+ rval = qla2x00_configure_hba(vha);
if (rval != QLA_SUCCESS) {
DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
- ha->host_no));
+ vha->host_no));
return (rval);
}
}
- save_flags = flags = ha->dpc_flags;
+ save_flags = flags = vha->dpc_flags;
DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
- ha->host_no, flags));
+ vha->host_no, flags));
/*
* If we have both an RSCN and PORT UPDATE pending then handle them
* both at the same time.
*/
- clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- clear_bit(RSCN_UPDATE, &ha->dpc_flags);
+ clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ clear_bit(RSCN_UPDATE, &vha->dpc_flags);
/* Determine what we need to do */
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- ha->flags.rscn_queue_overflow = 1;
+ vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
} else if (ha->current_topology == ISP_CFG_F &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
- ha->flags.rscn_queue_overflow = 1;
+ vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
clear_bit(LOCAL_LOOP_UPDATE, &flags);
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
- } else if (!ha->flags.online ||
+ } else if (!vha->flags.online ||
(test_bit(ABORT_ISP_ACTIVE, &flags))) {
- ha->flags.rscn_queue_overflow = 1;
+ vha->flags.rscn_queue_overflow = 1;
set_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
}
if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
- if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
rval = QLA_FUNCTION_FAILED;
- } else {
- rval = qla2x00_configure_local_loop(ha);
- }
+ else
+ rval = qla2x00_configure_local_loop(vha);
}
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
- if (LOOP_TRANSITION(ha)) {
+ if (LOOP_TRANSITION(vha))
rval = QLA_FUNCTION_FAILED;
- } else {
- rval = qla2x00_configure_fabric(ha);
- }
+ else
+ rval = qla2x00_configure_fabric(vha);
}
if (rval == QLA_SUCCESS) {
- if (atomic_read(&ha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
rval = QLA_FUNCTION_FAILED;
} else {
- atomic_set(&ha->loop_state, LOOP_READY);
+ atomic_set(&vha->loop_state, LOOP_READY);
- DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no));
+ DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
}
}
if (rval) {
DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
} else {
DEBUG3(printk("%s: exiting normally\n", __func__));
}
/* Restore state if a resync event occurred during processing */
- if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
- set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- if (test_bit(RSCN_UPDATE, &save_flags)) {
- ha->flags.rscn_queue_overflow = 1;
- set_bit(RSCN_UPDATE, &ha->dpc_flags);
- }
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ if (test_bit(RSCN_UPDATE, &save_flags))
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
}
return (rval);
@@ -2048,7 +2100,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
* 0 = success.
*/
static int
-qla2x00_configure_local_loop(scsi_qla_host_t *ha)
+qla2x00_configure_local_loop(scsi_qla_host_t *vha)
{
int rval, rval2;
int found_devs;
@@ -2060,18 +2112,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
char *id_iter;
uint16_t loop_id;
uint8_t domain, area, al_pa;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
found_devs = 0;
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES;
- DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no));
- DEBUG3(qla2x00_get_fcal_position_map(ha, NULL));
+ DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
+ DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
/* Get list of logged in devices. */
memset(ha->gid_list, 0, GID_LIST_SIZE);
- rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma,
+ rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
&entries);
if (rval != QLA_SUCCESS)
goto cleanup_allocation;
@@ -2082,7 +2134,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
entries * sizeof(struct gid_list_info)));
/* Allocate temporary fcport for any new fcports discovered. */
- new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -2092,17 +2144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/*
* Mark local devices that were present with FCF_DEVICE_LOST for now.
*/
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx != ha->vp_idx)
- continue;
-
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (atomic_read(&fcport->state) == FCS_ONLINE &&
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
DEBUG(printk("scsi(%ld): Marking port lost, "
"loop_id=0x%04x\n",
- ha->host_no, fcport->loop_id));
+ vha->host_no, fcport->loop_id));
atomic_set(&fcport->state, FCS_DEVICE_LOST);
fcport->flags &= ~FCF_FARP_DONE;
@@ -2129,7 +2178,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/* Bypass if not same domain and area of adapter. */
if (area && domain &&
- (area != ha->d_id.b.area || domain != ha->d_id.b.domain))
+ (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
continue;
/* Bypass invalid local loop ID. */
@@ -2141,26 +2190,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
- new_fcport->vp_idx = ha->vp_idx;
- rval2 = qla2x00_get_port_database(ha, new_fcport, 0);
+ new_fcport->vp_idx = vha->vp_idx;
+ rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
"information -- get_port_database=%x, "
"loop_id=0x%04x\n",
- ha->host_no, rval2, new_fcport->loop_id));
+ vha->host_no, rval2, new_fcport->loop_id));
DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
- ha->host_no));
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+ vha->host_no));
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
continue;
}
/* Check for matching device in port list. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx != ha->vp_idx)
- continue;
-
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2180,17 +2226,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
if (!found) {
/* New device, add to fcports list. */
new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
- if (ha->parent) {
- new_fcport->ha = ha;
- new_fcport->vp_idx = ha->vp_idx;
- list_add_tail(&new_fcport->vp_fcport,
- &ha->vp_fcports);
+ if (vha->vp_idx) {
+ new_fcport->vha = vha;
+ new_fcport->vp_idx = vha->vp_idx;
}
- list_add_tail(&new_fcport->list, &pha->fcports);
+ list_add_tail(&new_fcport->list, &vha->vp_fcports);
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -2201,7 +2245,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(ha, fcport);
+ qla2x00_update_fcport(vha, fcport);
found_devs++;
}
@@ -2211,24 +2255,25 @@ cleanup_allocation:
if (rval != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
- "rval=%x\n", ha->host_no, rval));
+ "rval=%x\n", vha->host_no, rval));
}
if (found_devs) {
- ha->device_flags |= DFLG_LOCAL_DEVICES;
- ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
+ vha->device_flags |= DFLG_LOCAL_DEVICES;
+ vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
}
return (rval);
}
static void
-qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
#define LS_UNKNOWN 2
static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
int rval;
uint16_t mb[6];
+ struct qla_hw_data *ha = vha->hw;
if (!IS_IIDMA_CAPABLE(ha))
return;
@@ -2237,12 +2282,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
fcport->fp_speed > ha->link_data_rate)
return;
- rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed,
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
mb);
if (rval != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
"%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
- ha->host_no, fcport->port_name[0], fcport->port_name[1],
+ vha->host_no, fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7], rval,
@@ -2260,10 +2305,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
}
static void
-qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
+ struct qla_hw_data *ha = vha->hw;
if (fcport->drport)
qla2x00_rport_del(fcport);
@@ -2273,15 +2319,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
rport_ids.port_id = fcport->d_id.b.domain << 16 |
fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
- fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+ fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
if (!rport) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irq(fcport->ha->host->host_lock);
+ spin_lock_irq(fcport->vha->host->host_lock);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irq(fcport->ha->host->host_lock);
+ spin_unlock_irq(fcport->vha->host->host_lock);
rport->supported_classes = fcport->supported_classes;
@@ -2309,23 +2355,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
* Kernel context.
*/
void
-qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
- fcport->ha = ha;
+ fcport->vha = vha;
fcport->login_retry = 0;
- fcport->port_login_retry_count = pha->port_down_retry_count *
+ fcport->port_login_retry_count = ha->port_down_retry_count *
PORT_RETRY_TIME;
- atomic_set(&fcport->port_down_timer, pha->port_down_retry_count *
+ atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
PORT_RETRY_TIME);
fcport->flags &= ~FCF_LOGIN_NEEDED;
- qla2x00_iidma_fcport(ha, fcport);
+ qla2x00_iidma_fcport(vha, fcport);
atomic_set(&fcport->state, FCS_ONLINE);
- qla2x00_reg_remote_port(ha, fcport);
+ qla2x00_reg_remote_port(vha, fcport);
}
/*
@@ -2340,7 +2386,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
* BIT_0 = error
*/
static int
-qla2x00_configure_fabric(scsi_qla_host_t *ha)
+qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval, rval2;
fc_port_t *fcport, *fcptemp;
@@ -2348,25 +2394,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
LIST_HEAD(new_fcports);
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
/* If FL port exists, then SNS is present */
if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_F_PORT;
else
loop_id = SNS_FL_PORT;
- rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1);
+ rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
if (rval != QLA_SUCCESS) {
DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
- "Port\n", ha->host_no));
+ "Port\n", vha->host_no));
- ha->device_flags &= ~SWITCH_FOUND;
+ vha->device_flags &= ~SWITCH_FOUND;
return (QLA_SUCCESS);
}
- ha->device_flags |= SWITCH_FOUND;
+ vha->device_flags |= SWITCH_FOUND;
/* Mark devices that need re-synchronization. */
- rval2 = qla2x00_device_resync(ha);
+ rval2 = qla2x00_device_resync(vha);
if (rval2 == QLA_RSCNS_HANDLED) {
/* No point doing the scan, just continue. */
return (QLA_SUCCESS);
@@ -2374,15 +2421,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
do {
/* FDMI support. */
if (ql2xfdmienable &&
- test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
- qla2x00_fdmi_register(ha);
+ test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
+ qla2x00_fdmi_register(vha);
/* Ensure we are logged into the SNS. */
if (IS_FWI2_CAPABLE(ha))
loop_id = NPH_SNS;
else
loop_id = SIMPLE_NAME_SERVER;
- ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff,
+ ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
0xfc, mb, BIT_1 | BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2392,29 +2439,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
return (QLA_SUCCESS);
}
- if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) {
- if (qla2x00_rft_id(ha)) {
+ if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
+ if (qla2x00_rft_id(vha)) {
/* EMPTY */
DEBUG2(printk("scsi(%ld): Register FC-4 "
- "TYPE failed.\n", ha->host_no));
+ "TYPE failed.\n", vha->host_no));
}
- if (qla2x00_rff_id(ha)) {
+ if (qla2x00_rff_id(vha)) {
/* EMPTY */
DEBUG2(printk("scsi(%ld): Register FC-4 "
- "Features failed.\n", ha->host_no));
+ "Features failed.\n", vha->host_no));
}
- if (qla2x00_rnn_id(ha)) {
+ if (qla2x00_rnn_id(vha)) {
/* EMPTY */
DEBUG2(printk("scsi(%ld): Register Node Name "
- "failed.\n", ha->host_no));
- } else if (qla2x00_rsnn_nn(ha)) {
+ "failed.\n", vha->host_no));
+ } else if (qla2x00_rsnn_nn(vha)) {
/* EMPTY */
DEBUG2(printk("scsi(%ld): Register Symbolic "
- "Node Name failed.\n", ha->host_no));
+ "Node Name failed.\n", vha->host_no));
}
}
- rval = qla2x00_find_all_fabric_devs(ha, &new_fcports);
+ rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
@@ -2422,24 +2469,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
* Logout all previous fabric devices marked lost, except
* tape devices.
*/
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx !=ha->vp_idx)
- continue;
-
- if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
- qla2x00_mark_device_lost(ha, fcport,
+ qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
if (fcport->loop_id != FC_NO_LOOP_ID &&
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(ha,
+ ha->isp_ops->fabric_logout(vha,
fcport->loop_id,
fcport->d_id.b.domain,
fcport->d_id.b.area,
@@ -2450,18 +2494,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
}
/* Starting free loop ID. */
- next_loopid = pha->min_external_loopid;
+ next_loopid = ha->min_external_loopid;
/*
* Scan through our port list and login entries that need to be
* logged in.
*/
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx != ha->vp_idx)
- continue;
-
- if (atomic_read(&ha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2471,14 +2512,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->loop_id = next_loopid;
rval = qla2x00_find_new_loop_id(
- to_qla_parent(ha), fcport);
+ base_vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of IDs to use */
break;
}
}
/* Login and update database */
- qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
}
/* Exit if out of loop IDs. */
@@ -2490,31 +2531,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
* Login and add the new devices to our port list.
*/
list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&ha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
/* Find a new loop ID to use. */
fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(to_qla_parent(ha),
- fcport);
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of IDs to use */
break;
}
/* Login and update database */
- qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
-
- if (ha->parent) {
- fcport->ha = ha;
- fcport->vp_idx = ha->vp_idx;
- list_add_tail(&fcport->vp_fcport,
- &ha->vp_fcports);
- list_move_tail(&fcport->list,
- &ha->parent->fcports);
- } else
- list_move_tail(&fcport->list, &ha->fcports);
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ if (vha->vp_idx) {
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ }
+ list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
@@ -2526,7 +2562,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
if (rval) {
DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
- "rval=%d\n", ha->host_no, rval));
+ "rval=%d\n", vha->host_no, rval));
}
return (rval);
@@ -2547,7 +2583,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
* Kernel context.
*/
static int
-qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ struct list_head *new_fcports)
{
int rval;
uint16_t loop_id;
@@ -2558,11 +2595,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
int swl_idx;
int first_dev, last_dev;
port_id_t wrap, nxt_d_id;
- int vp_index;
- int empty_vp_index;
- int found_vp;
- scsi_qla_host_t *vha;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
rval = QLA_SUCCESS;
@@ -2571,43 +2605,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
if (!swl) {
/*EMPTY*/
DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
- "on GA_NXT\n", ha->host_no));
+ "on GA_NXT\n", vha->host_no));
} else {
- if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) {
+ if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
kfree(swl);
swl = NULL;
- } else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) {
+ } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
kfree(swl);
swl = NULL;
- } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
+ } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
kfree(swl);
swl = NULL;
} else if (ql2xiidmaenable &&
- qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
- qla2x00_gpsc(ha, swl);
+ qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
+ qla2x00_gpsc(vha, swl);
}
}
swl_idx = 0;
/* Allocate temporary fcport for any new fcports discovered. */
- new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
- new_fcport->vp_idx = ha->vp_idx;
/* Set start port ID scan at adapter ID. */
first_dev = 1;
last_dev = 0;
/* Starting free loop ID. */
- loop_id = pha->min_external_loopid;
- for (; loop_id <= ha->last_loop_id; loop_id++) {
- if (qla2x00_is_reserved_id(ha, loop_id))
+ loop_id = ha->min_external_loopid;
+ for (; loop_id <= ha->max_loop_id; loop_id++) {
+ if (qla2x00_is_reserved_id(vha, loop_id))
continue;
- if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha))
+ if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
break;
if (swl != NULL) {
@@ -2630,7 +2663,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
}
} else {
/* Send GA_NXT to the switch */
- rval = qla2x00_ga_nxt(ha, new_fcport);
+ rval = qla2x00_ga_nxt(vha, new_fcport);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"SNS scan failed -- assuming zero-entry "
@@ -2651,44 +2684,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
first_dev = 0;
} else if (new_fcport->d_id.b24 == wrap.b24) {
DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
- ha->host_no, new_fcport->d_id.b.domain,
+ vha->host_no, new_fcport->d_id.b.domain,
new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
break;
}
/* Bypass if same physical adapter. */
- if (new_fcport->d_id.b24 == pha->d_id.b24)
+ if (new_fcport->d_id.b24 == base_vha->d_id.b24)
continue;
/* Bypass virtual ports of the same host. */
- if (pha->num_vhosts) {
- for_each_mapped_vp_idx(pha, vp_index) {
- empty_vp_index = 1;
- found_vp = 0;
- list_for_each_entry(vha, &pha->vp_list,
- vp_list) {
- if (vp_index == vha->vp_idx) {
- empty_vp_index = 0;
- found_vp = 1;
- break;
- }
- }
-
- if (empty_vp_index)
- continue;
-
- if (found_vp &&
- new_fcport->d_id.b24 == vha->d_id.b24)
+ found = 0;
+ if (ha->num_vhosts) {
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (new_fcport->d_id.b24 == vp->d_id.b24) {
+ found = 1;
break;
+ }
}
-
- if (vp_index <= pha->max_npiv_vports)
+ if (found)
continue;
}
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
- (ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
+ (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
ISP_CFG_FL)
continue;
@@ -2698,9 +2718,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
/* Locate matching device in database. */
found = 0;
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (new_fcport->vp_idx != fcport->vp_idx)
- continue;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(new_fcport->port_name, fcport->port_name,
WWN_SIZE))
continue;
@@ -2744,7 +2762,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
(fcport->flags & FCF_TAPE_PRESENT) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@@ -2755,27 +2773,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
if (found)
continue;
-
/* If device was not in our fcports list, then add it. */
list_add_tail(&new_fcport->list, new_fcports);
/* Allocate a new replacement fcport. */
nxt_d_id.b24 = new_fcport->d_id.b24;
- new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL);
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
new_fcport->d_id.b24 = nxt_d_id.b24;
- new_fcport->vp_idx = ha->vp_idx;
}
kfree(swl);
kfree(new_fcport);
if (!list_empty(new_fcports))
- ha->device_flags |= DFLG_FABRIC_DEVICES;
+ vha->device_flags |= DFLG_FABRIC_DEVICES;
return (rval);
}
@@ -2795,13 +2811,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
* Kernel context.
*/
static int
-qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
+qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
{
int rval;
int found;
fc_port_t *fcport;
uint16_t first_loop_id;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp;
rval = QLA_SUCCESS;
@@ -2810,17 +2827,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
for (;;) {
/* Skip loop ID if already used by adapter. */
- if (dev->loop_id == ha->loop_id) {
+ if (dev->loop_id == vha->loop_id)
dev->loop_id++;
- }
/* Skip reserved loop IDs. */
- while (qla2x00_is_reserved_id(ha, dev->loop_id)) {
+ while (qla2x00_is_reserved_id(vha, dev->loop_id))
dev->loop_id++;
- }
/* Reset loop ID if passed the end. */
- if (dev->loop_id > ha->last_loop_id) {
+ if (dev->loop_id > ha->max_loop_id) {
/* first loop ID. */
dev->loop_id = ha->min_external_loopid;
}
@@ -2828,12 +2843,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
/* Check for loop ID being already in use. */
found = 0;
fcport = NULL;
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->loop_id == dev->loop_id && fcport != dev) {
- /* ID possibly in use */
- found++;
- break;
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ list_for_each_entry(fcport, &vp->vp_fcports, list) {
+ if (fcport->loop_id == dev->loop_id &&
+ fcport != dev) {
+ /* ID possibly in use */
+ found++;
+ break;
+ }
}
+ if (found)
+ break;
}
/* If not in use then it is free to use. */
@@ -2866,7 +2886,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
* Kernel context.
*/
static int
-qla2x00_device_resync(scsi_qla_host_t *ha)
+qla2x00_device_resync(scsi_qla_host_t *vha)
{
int rval;
uint32_t mask;
@@ -2875,14 +2895,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
uint8_t rscn_out_iter;
uint8_t format;
port_id_t d_id;
- scsi_qla_host_t *pha = to_qla_parent(ha);
rval = QLA_RSCNS_HANDLED;
- while (ha->rscn_out_ptr != ha->rscn_in_ptr ||
- ha->flags.rscn_queue_overflow) {
+ while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
+ vha->flags.rscn_queue_overflow) {
- rscn_entry = ha->rscn_queue[ha->rscn_out_ptr];
+ rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
format = MSB(MSW(rscn_entry));
d_id.b.domain = LSB(MSW(rscn_entry));
d_id.b.area = MSB(LSW(rscn_entry));
@@ -2890,37 +2909,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
"[%02x/%02x%02x%02x].\n",
- ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain,
+ vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
d_id.b.area, d_id.b.al_pa));
- ha->rscn_out_ptr++;
- if (ha->rscn_out_ptr == MAX_RSCN_COUNT)
- ha->rscn_out_ptr = 0;
+ vha->rscn_out_ptr++;
+ if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
+ vha->rscn_out_ptr = 0;
/* Skip duplicate entries. */
- for (rscn_out_iter = ha->rscn_out_ptr;
- !ha->flags.rscn_queue_overflow &&
- rscn_out_iter != ha->rscn_in_ptr;
+ for (rscn_out_iter = vha->rscn_out_ptr;
+ !vha->flags.rscn_queue_overflow &&
+ rscn_out_iter != vha->rscn_in_ptr;
rscn_out_iter = (rscn_out_iter ==
(MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
- if (rscn_entry != ha->rscn_queue[rscn_out_iter])
+ if (rscn_entry != vha->rscn_queue[rscn_out_iter])
break;
DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
- "entry found at [%d].\n", ha->host_no,
+ "entry found at [%d].\n", vha->host_no,
rscn_out_iter));
- ha->rscn_out_ptr = rscn_out_iter;
+ vha->rscn_out_ptr = rscn_out_iter;
}
/* Queue overflow, set switch default case. */
- if (ha->flags.rscn_queue_overflow) {
+ if (vha->flags.rscn_queue_overflow) {
DEBUG(printk("scsi(%ld): device_resync: rscn "
- "overflow.\n", ha->host_no));
+ "overflow.\n", vha->host_no));
format = 3;
- ha->flags.rscn_queue_overflow = 0;
+ vha->flags.rscn_queue_overflow = 0;
}
switch (format) {
@@ -2936,16 +2955,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
default:
mask = 0x0;
d_id.b24 = 0;
- ha->rscn_out_ptr = ha->rscn_in_ptr;
+ vha->rscn_out_ptr = vha->rscn_in_ptr;
break;
}
rval = QLA_SUCCESS;
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx != ha->vp_idx)
- continue;
-
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
(fcport->d_id.b24 & mask) != d_id.b24 ||
fcport->port_type == FCT_BROADCAST)
@@ -2954,7 +2970,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
if (atomic_read(&fcport->state) == FCS_ONLINE) {
if (format != 3 ||
fcport->port_type != FCT_INITIATOR) {
- qla2x00_mark_device_lost(ha, fcport,
+ qla2x00_mark_device_lost(vha, fcport,
0, 0);
}
}
@@ -2981,30 +2997,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
* Kernel context.
*/
static int
-qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
uint16_t *next_loopid)
{
int rval;
int retry;
uint8_t opts;
+ struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
retry = 0;
- rval = qla2x00_fabric_login(ha, fcport, next_loopid);
+ rval = qla2x00_fabric_login(vha, fcport, next_loopid);
if (rval == QLA_SUCCESS) {
/* Send an ADISC to tape devices.*/
opts = 0;
if (fcport->flags & FCF_TAPE_PRESENT)
opts |= BIT_1;
- rval = qla2x00_get_port_database(ha, fcport, opts);
+ rval = qla2x00_get_port_database(vha, fcport, opts);
if (rval != QLA_SUCCESS) {
- ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(ha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
} else {
- qla2x00_update_fcport(ha, fcport);
+ qla2x00_update_fcport(vha, fcport);
}
}
@@ -3026,13 +3043,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* 3 - Fatal error
*/
int
-qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
uint16_t *next_loopid)
{
int rval;
int retry;
uint16_t tmp_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
retry = 0;
tmp_loopid = 0;
@@ -3040,11 +3058,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
for (;;) {
DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
"for port %02x%02x%02x.\n",
- ha->host_no, fcport->loop_id, fcport->d_id.b.domain,
+ vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa));
/* Login fcport on switch. */
- ha->isp_ops->fabric_login(ha, fcport->loop_id,
+ ha->isp_ops->fabric_login(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb, BIT_0);
if (mb[0] == MBS_PORT_ID_USED) {
@@ -3100,7 +3118,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* Loop ID already used, try next loop ID.
*/
fcport->loop_id++;
- rval = qla2x00_find_new_loop_id(ha, fcport);
+ rval = qla2x00_find_new_loop_id(vha, fcport);
if (rval != QLA_SUCCESS) {
/* Ran out of loop IDs to use */
break;
@@ -3112,10 +3130,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* dead.
*/
*next_loopid = fcport->loop_id;
- ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(ha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
rval = 1;
break;
@@ -3125,12 +3143,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
*/
DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
"loop_id=%x jiffies=%lx.\n",
- __func__, ha->host_no, mb[0],
+ __func__, vha->host_no, mb[0],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
*next_loopid = fcport->loop_id;
- ha->isp_ops->fabric_logout(ha, fcport->loop_id,
+ ha->isp_ops->fabric_logout(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->loop_id = FC_NO_LOOP_ID;
@@ -3158,13 +3176,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
* 3 - Fatal error
*/
int
-qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
{
int rval;
uint16_t mb[MAILBOX_REGISTER_COUNT];
memset(mb, 0, sizeof(mb));
- rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0);
+ rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
if (rval == QLA_SUCCESS) {
/* Interrogate mailbox registers for any errors */
if (mb[0] == MBS_COMMAND_ERROR)
@@ -3188,57 +3206,57 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
* 0 = success
*/
int
-qla2x00_loop_resync(scsi_qla_host_t *ha)
+qla2x00_loop_resync(scsi_qla_host_t *vha)
{
- int rval;
+ int rval = QLA_SUCCESS;
uint32_t wait_time;
-
- rval = QLA_SUCCESS;
-
- atomic_set(&ha->loop_state, LOOP_UPDATE);
- clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
- if (ha->flags.online) {
- if (!(rval = qla2x00_fw_ready(ha))) {
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ atomic_set(&vha->loop_state, LOOP_UPDATE);
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ if (vha->flags.online) {
+ if (!(rval = qla2x00_fw_ready(vha))) {
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
- atomic_set(&ha->loop_state, LOOP_UPDATE);
+ atomic_set(&vha->loop_state, LOOP_UPDATE);
/* Issue a marker after FW becomes ready. */
- qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
- ha->marker_needed = 0;
+ qla2x00_marker(vha, req, rsp, 0, 0,
+ MK_SYNC_ALL);
+ vha->marker_needed = 0;
/* Remap devices on Loop. */
- clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2x00_configure_loop(ha);
+ qla2x00_configure_loop(vha);
wait_time--;
- } while (!atomic_read(&ha->loop_down_timer) &&
- !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) &&
- wait_time &&
- (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags)));
}
}
- if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
return (QLA_FUNCTION_FAILED);
- }
- if (rval) {
+ if (rval)
DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
- }
return (rval);
}
void
-qla2x00_update_fcports(scsi_qla_host_t *ha)
+qla2x00_update_fcports(scsi_qla_host_t *vha)
{
fc_port_t *fcport;
/* Go with deferred removal of rport references. */
- list_for_each_entry(fcport, &ha->fcports, list)
- if (fcport->drport &&
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ if (fcport && fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED)
qla2x00_rport_del(fcport);
}
@@ -3254,63 +3272,65 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
* 0 = success
*/
int
-qla2x00_abort_isp(scsi_qla_host_t *ha)
+qla2x00_abort_isp(scsi_qla_host_t *vha)
{
int rval;
uint8_t status = 0;
- scsi_qla_host_t *vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *vp;
+ struct req_que *req = ha->req_q_map[0];
- if (ha->flags.online) {
- ha->flags.online = 0;
- clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ if (vha->flags.online) {
+ vha->flags.online = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- ha->isp_ops->reset_chip(ha);
-
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- qla2x00_mark_all_devices_lost(ha, 0);
- list_for_each_entry(vha, &ha->vp_list, vp_list)
- qla2x00_mark_all_devices_lost(vha, 0);
+ ha->isp_ops->reset_chip(vha);
+
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ qla2x00_mark_all_devices_lost(vp, 0);
} else {
- if (!atomic_read(&ha->loop_down_timer))
- atomic_set(&ha->loop_down_timer,
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
}
/* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(ha, DID_RESET << 16);
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
- ha->isp_ops->get_flash_version(ha, ha->request_ring);
+ ha->isp_ops->get_flash_version(vha, req->ring);
- ha->isp_ops->nvram_config(ha);
+ ha->isp_ops->nvram_config(vha);
- if (!qla2x00_restart_isp(ha)) {
- clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ if (!qla2x00_restart_isp(vha)) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
- if (!atomic_read(&ha->loop_down_timer)) {
+ if (!atomic_read(&vha->loop_down_timer)) {
/*
* Issue marker command only when we are going
* to start the I/O .
*/
- ha->marker_needed = 1;
+ vha->marker_needed = 1;
}
- ha->flags.online = 1;
+ vha->flags.online = 1;
ha->isp_ops->enable_intrs(ha);
ha->isp_abort_cnt = 0;
- clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
if (ha->fce) {
ha->flags.fce_enabled = 1;
memset(ha->fce, 0,
fce_calc_size(ha->fce_bufs));
- rval = qla2x00_enable_fce_trace(ha,
+ rval = qla2x00_enable_fce_trace(vha,
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
@@ -3323,7 +3343,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
if (ha->eft) {
memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(ha,
+ rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
qla_printk(KERN_WARNING, ha,
@@ -3332,8 +3352,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
}
}
} else { /* failed the ISP abort */
- ha->flags.online = 1;
- if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
+ vha->flags.online = 1;
+ if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) {
qla_printk(KERN_WARNING, ha,
"ISP error recovery failed - "
@@ -3342,37 +3362,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
* The next call disables the board
* completely.
*/
- ha->isp_ops->reset_adapter(ha);
- ha->flags.online = 0;
+ ha->isp_ops->reset_adapter(vha);
+ vha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
- &ha->dpc_flags);
+ &vha->dpc_flags);
status = 0;
} else { /* schedule another ISP abort */
ha->isp_abort_cnt--;
DEBUG(printk("qla%ld: ISP abort - "
"retry remaining %d\n",
- ha->host_no, ha->isp_abort_cnt));
+ vha->host_no, ha->isp_abort_cnt));
status = 1;
}
} else {
ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
DEBUG(printk("qla2x00(%ld): ISP error recovery "
"- retrying (%d) more times\n",
- ha->host_no, ha->isp_abort_cnt));
- set_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+ vha->host_no, ha->isp_abort_cnt));
+ set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
status = 1;
}
}
}
- if (status) {
+ if (!status) {
+ DEBUG(printk(KERN_INFO
+ "qla2x00_abort_isp(%ld): succeeded.\n",
+ vha->host_no));
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx)
+ qla2x00_vp_abort_isp(vp);
+ }
+ } else {
qla_printk(KERN_INFO, ha,
"qla2x00_abort_isp: **** FAILED ****\n");
- } else {
- DEBUG(printk(KERN_INFO
- "qla2x00_abort_isp(%ld): exiting.\n",
- ha->host_no));
}
return(status);
@@ -3389,42 +3413,50 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
* 0 = success
*/
static int
-qla2x00_restart_isp(scsi_qla_host_t *ha)
+qla2x00_restart_isp(scsi_qla_host_t *vha)
{
uint8_t status = 0;
uint32_t wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
/* If firmware needs to be loaded */
- if (qla2x00_isp_firmware(ha)) {
- ha->flags.online = 0;
- if (!(status = ha->isp_ops->chip_diag(ha)))
- status = qla2x00_setup_chip(ha);
+ if (qla2x00_isp_firmware(vha)) {
+ vha->flags.online = 0;
+ status = ha->isp_ops->chip_diag(vha);
+ if (!status)
+ status = qla2x00_setup_chip(vha);
}
- if (!status && !(status = qla2x00_init_rings(ha))) {
- clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
- if (!(status = qla2x00_fw_ready(ha))) {
+ if (!status && !(status = qla2x00_init_rings(vha))) {
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ /* Initialize the queues in use */
+ qla25xx_init_queues(ha);
+
+ status = qla2x00_fw_ready(vha);
+ if (!status) {
DEBUG(printk("%s(): Start configure loop, "
"status = %d\n", __func__, status));
/* Issue a marker after FW becomes ready. */
- qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
- ha->flags.online = 1;
+ vha->flags.online = 1;
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
- clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- qla2x00_configure_loop(ha);
+ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2x00_configure_loop(vha);
wait_time--;
- } while (!atomic_read(&ha->loop_down_timer) &&
- !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) &&
- wait_time &&
- (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)));
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags)));
}
/* if no cable then assume it's good */
- if ((ha->device_flags & DFLG_NO_CABLE))
+ if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3434,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
return (status);
}
+static int
+qla25xx_init_queues(struct qla_hw_data *ha)
+{
+ struct rsp_que *rsp = NULL;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int ret = -1;
+ int i;
+
+ for (i = 1; i < ha->max_queues; i++) {
+ rsp = ha->rsp_q_map[i];
+ if (rsp) {
+ rsp->options &= ~BIT_0;
+ ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options);
+ if (ret != QLA_SUCCESS)
+ DEBUG2_17(printk(KERN_WARNING
+ "%s Rsp que:%d init failed\n", __func__,
+ rsp->id));
+ else
+ DEBUG2_17(printk(KERN_INFO
+ "%s Rsp que:%d inited\n", __func__,
+ rsp->id));
+ }
+ req = ha->req_q_map[i];
+ if (req) {
+ req->options &= ~BIT_0;
+ ret = qla25xx_init_req_que(base_vha, req, req->options);
+ if (ret != QLA_SUCCESS)
+ DEBUG2_17(printk(KERN_WARNING
+ "%s Req que:%d init failed\n", __func__,
+ req->id));
+ else
+ DEBUG2_17(printk(KERN_WARNING
+ "%s Rsp que:%d inited\n", __func__,
+ req->id));
+ }
+ }
+ return ret;
+}
+
/*
* qla2x00_reset_adapter
* Reset adapter.
@@ -3442,12 +3514,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
* ha = adapter block pointer.
*/
void
-qla2x00_reset_adapter(scsi_qla_host_t *ha)
+qla2x00_reset_adapter(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- ha->flags.online = 0;
+ vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3459,12 +3532,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
}
void
-qla24xx_reset_adapter(scsi_qla_host_t *ha)
+qla24xx_reset_adapter(scsi_qla_host_t *vha)
{
unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- ha->flags.online = 0;
+ vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3478,9 +3552,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
/* On sparc systems, obtain port and node WWN from firmware
* properties.
*/
-static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv)
+static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
+ struct nvram_24xx *nv)
{
#ifdef CONFIG_SPARC
+ struct qla_hw_data *ha = vha->hw;
struct pci_dev *pdev = ha->pdev;
struct device_node *dp = pci_device_to_OF_node(pdev);
const u8 *val;
@@ -3497,7 +3573,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
}
int
-qla24xx_nvram_config(scsi_qla_host_t *ha)
+qla24xx_nvram_config(scsi_qla_host_t *vha)
{
int rval;
struct init_cb_24xx *icb;
@@ -3506,6 +3582,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
+ struct qla_hw_data *ha = vha->hw;
rval = QLA_SUCCESS;
icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3523,12 +3600,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
- ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd,
+ ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
dptr = (uint32_t *)nv;
- ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base,
+ ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
@@ -3573,7 +3650,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
nv->node_name[5] = 0x1c;
nv->node_name[6] = 0x55;
nv->node_name[7] = 0x86;
- qla24xx_nvram_wwn_from_ofw(ha, nv);
+ qla24xx_nvram_wwn_from_ofw(vha, nv);
nv->login_retry_count = __constant_cpu_to_le16(8);
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3593,7 +3670,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
}
/* Reset Initialization control block */
- memset(icb, 0, sizeof(struct init_cb_24xx));
+ memset(icb, 0, ha->init_cb_size);
/* Copy 1st segment. */
dptr1 = (uint8_t *)icb;
@@ -3616,7 +3693,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
/*
* Setup driver NVRAM options.
*/
- qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name),
+ qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLA2462");
/* Use alternate WWN? */
@@ -3655,8 +3732,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
ha->serial0 = icb->port_name[5];
ha->serial1 = icb->port_name[6];
ha->serial2 = icb->port_name[7];
- ha->node_name = icb->node_name;
- ha->port_name = icb->port_name;
+ memcpy(vha->node_name, icb->node_name, WWN_SIZE);
+ memcpy(vha->port_name, icb->port_name, WWN_SIZE);
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
@@ -3711,7 +3788,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
ha->login_retry_count = ql2xloginretrycount;
/* Enable ZIO. */
- if (!ha->flags.init_done) {
+ if (!vha->flags.init_done) {
ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
(BIT_3 | BIT_2 | BIT_1 | BIT_0);
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3719,12 +3796,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
}
icb->firmware_options_2 &= __constant_cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
- ha->flags.process_response_queue = 0;
+ vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
- "(%d us).\n", ha->host_no, ha->zio_mode,
+ "(%d us).\n", vha->host_no, ha->zio_mode,
ha->zio_timer * 100));
qla_printk(KERN_INFO, ha,
"ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3733,36 +3810,37 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
- ha->flags.process_response_queue = 1;
+ vha->flags.process_response_queue = 1;
}
if (rval) {
DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", ha->host_no));
+ "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
}
return (rval);
}
static int
-qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
- int rval;
+ int rval = QLA_SUCCESS;
int segments, fragment;
uint32_t faddr;
uint32_t *dcode, dlen;
uint32_t risc_addr;
uint32_t risc_size;
uint32_t i;
-
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
faddr = ha->flt_region_fw;
- dcode = (uint32_t *)ha->request_ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
/* Validate firmware image by checking version. */
- qla24xx_read_flash_data(ha, dcode, faddr + 4, 4);
+ qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3780,7 +3858,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
while (segments && rval == QLA_SUCCESS) {
/* Read segment's load information. */
- qla24xx_read_flash_data(ha, dcode, faddr, 4);
+ qla24xx_read_flash_data(vha, dcode, faddr, 4);
risc_addr = be32_to_cpu(dcode[2]);
*srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3794,17 +3872,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
"addr %x, number of dwords 0x%x, offset 0x%x.\n",
- ha->host_no, risc_addr, dlen, faddr));
+ vha->host_no, risc_addr, dlen, faddr));
- qla24xx_read_flash_data(ha, dcode, faddr, dlen);
+ qla24xx_read_flash_data(vha, dcode, faddr, dlen);
for (i = 0; i < dlen; i++)
dcode[i] = swab32(dcode[i]);
- rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", ha->host_no,
+ "segment %d of firmware\n", vha->host_no,
fragment));
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to load segment %d of "
@@ -3828,16 +3906,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
int
-qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
uint16_t *wcode, *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/* Load firmware blob. */
- blob = qla2x00_request_firmware(ha);
+ blob = qla2x00_request_firmware(vha);
if (!blob) {
qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3847,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
rval = QLA_SUCCESS;
- wcode = (uint16_t *)ha->request_ring;
+ wcode = (uint16_t *)req->ring;
*srisc_addr = 0;
fwcode = (uint16_t *)blob->fw->data;
fwclen = 0;
@@ -3894,17 +3974,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
wlen = risc_size;
DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of words 0x%x.\n", ha->host_no,
+ "addr %x, number of words 0x%x.\n", vha->host_no,
risc_addr, wlen));
for (i = 0; i < wlen; i++)
wcode[i] = swab16(fwcode[i]);
- rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
if (rval) {
DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", ha->host_no,
+ "segment %d of firmware\n", vha->host_no,
fragment));
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to load segment %d of "
@@ -3928,7 +4008,7 @@ fail_fw_integrity:
}
int
-qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
+qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int segments, fragment;
@@ -3938,9 +4018,11 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
uint32_t i;
struct fw_blob *blob;
uint32_t *fwcode, fwclen;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/* Load firmware blob. */
- blob = qla2x00_request_firmware(ha);
+ blob = qla2x00_request_firmware(vha);
if (!blob) {
qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3949,13 +4031,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
/* Try to load RISC code from flash. */
qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
"outdated) firmware from flash.\n");
- return qla24xx_load_risc_flash(ha, srisc_addr);
+ return qla24xx_load_risc_flash(vha, srisc_addr);
}
rval = QLA_SUCCESS;
segments = FA_RISC_CODE_SEGMENTS;
- dcode = (uint32_t *)ha->request_ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
fwcode = (uint32_t *)blob->fw->data;
fwclen = 0;
@@ -4003,17 +4085,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
dlen = risc_size;
DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of dwords 0x%x.\n", ha->host_no,
+ "addr %x, number of dwords 0x%x.\n", vha->host_no,
risc_addr, dlen));
for (i = 0; i < dlen; i++)
dcode[i] = swab32(fwcode[i]);
- rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr,
+ rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", ha->host_no,
+ "segment %d of firmware\n", vha->host_no,
fragment));
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to load segment %d of "
@@ -4037,49 +4119,53 @@ fail_fw_integrity:
}
void
-qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
+qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
{
int ret, retries;
+ struct qla_hw_data *ha = vha->hw;
if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
return;
- ret = qla2x00_stop_firmware(ha);
+ ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
retries ; retries--) {
- ha->isp_ops->reset_chip(ha);
- if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
+ ha->isp_ops->reset_chip(vha);
+ if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
continue;
- if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
+ if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
continue;
qla_printk(KERN_INFO, ha,
"Attempting retry of stop-firmware command...\n");
- ret = qla2x00_stop_firmware(ha);
+ ret = qla2x00_stop_firmware(vha);
}
}
int
-qla24xx_configure_vhba(scsi_qla_host_t *ha)
+qla24xx_configure_vhba(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
uint16_t mb[MAILBOX_REGISTER_COUNT];
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
- if (!ha->parent)
+ if (!vha->vp_idx)
return -EINVAL;
- rval = qla2x00_fw_ready(ha->parent);
+ rval = qla2x00_fw_ready(base_vha);
if (rval == QLA_SUCCESS) {
- clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
- qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
}
- ha->flags.management_server_logged_in = 0;
+ vha->flags.management_server_logged_in = 0;
/* Login to SNS first */
- qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc,
- mb, BIT_1);
+ ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG15(qla_printk(KERN_INFO, ha,
"Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4088,11 +4174,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
return (QLA_FUNCTION_FAILED);
}
- atomic_set(&ha->loop_down_timer, 0);
- atomic_set(&ha->loop_state, LOOP_UP);
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- rval = qla2x00_loop_resync(ha->parent);
+ atomic_set(&vha->loop_down_timer, 0);
+ atomic_set(&vha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ rval = qla2x00_loop_resync(base_vha);
return rval;
}
@@ -4103,9 +4189,10 @@ static LIST_HEAD(qla_cs84xx_list);
static DEFINE_MUTEX(qla_cs84xx_mutex);
static struct qla_chip_state_84xx *
-qla84xx_get_chip(struct scsi_qla_host *ha)
+qla84xx_get_chip(struct scsi_qla_host *vha)
{
struct qla_chip_state_84xx *cs84xx;
+ struct qla_hw_data *ha = vha->hw;
mutex_lock(&qla_cs84xx_mutex);
@@ -4145,21 +4232,23 @@ __qla84xx_chip_release(struct kref *kref)
}
void
-qla84xx_put_chip(struct scsi_qla_host *ha)
+qla84xx_put_chip(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
if (ha->cs84xx)
kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
}
static int
-qla84xx_init_chip(scsi_qla_host_t *ha)
+qla84xx_init_chip(scsi_qla_host_t *vha)
{
int rval;
uint16_t status[2];
+ struct qla_hw_data *ha = vha->hw;
mutex_lock(&ha->cs84xx->fw_update_mutex);
- rval = qla84xx_verify_chip(ha, status);
+ rval = qla84xx_verify_chip(vha, status);
mutex_unlock(&ha->cs84xx->fw_update_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..5e0a7095c9f2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,47 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
}
static inline void
-qla2x00_poll(scsi_qla_host_t *ha)
+qla2x00_poll(struct rsp_que *rsp)
{
unsigned long flags;
-
+ struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- ha->isp_ops->intr_handler(0, ha);
+ ha->isp_ops->intr_handler(0, rsp);
local_irq_restore(flags);
}
-static __inline__ scsi_qla_host_t *
-to_qla_parent(scsi_qla_host_t *ha)
-{
- return ha->parent ? ha->parent : ha;
-}
-
-/**
- * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
- * @ha: HA context
- * @ha_locked: is function called with the hardware lock
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-static inline int
-qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
-{
- /* Send marker if required */
- if (ha->marker_needed != 0) {
- if (ha_locked) {
- if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
- return (QLA_FUNCTION_FAILED);
- } else {
- if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
- return (QLA_FUNCTION_FAILED);
- }
- ha->marker_needed = 0;
- }
- return (QLA_SUCCESS);
-}
-
static inline uint8_t *
host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
{
@@ -87,11 +55,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
}
static inline int
-qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id)
+qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
{
+ struct qla_hw_data *ha = vha->hw;
if (IS_FWI2_CAPABLE(ha))
return (loop_id > NPH_LAST_HANDLE);
- return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
+ return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
-};
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
#include <scsi/scsi_tcq.h>
-static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
-static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
+static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
+ struct rsp_que *rsp);
+static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
/* Set transfer direction */
if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- sp->fcport->ha->qla_stats.output_bytes +=
+ sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
- sp->fcport->ha->qla_stats.input_bytes +=
+ sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
static inline cont_entry_t *
-qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
{
cont_entry_t *cont_pkt;
-
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- ha->request_ring_ptr++;
+ req->ring_ptr++;
}
- cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
+ cont_pkt = (cont_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
+qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
{
cont_a64_entry_t *cont_pkt;
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- ha->request_ring_ptr++;
+ req->ring_ptr++;
}
- cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
*((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
* Seven DSDs are available in the Continuation
* Type 0 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
avail_dsds = 7;
}
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
uint16_t req_cnt;
uint16_t tot_dsds;
struct device_reg_2xxx __iomem *reg;
+ struct qla_hw_data *ha;
+ struct req_que *req;
+ struct rsp_que *rsp;
/* Setup device pointers. */
ret = 0;
- ha = sp->ha;
+ vha = sp->vha;
+ ha = vha->hw;
reg = &ha->iobase->isp;
cmd = sp->cmd;
+ req = ha->req_q_map[0];
+ rsp = ha->rsp_q_map[0];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
- if (ha->marker_needed != 0) {
- if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+ != QLA_SUCCESS)
return (QLA_FUNCTION_FAILED);
- }
- ha->marker_needed = 0;
+ vha->marker_needed = 0;
}
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
- handle = ha->current_outstanding_cmd;
+ handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
- if (!ha->outstanding_cmds[handle])
+ if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
- if (ha->req_q_cnt < (req_cnt + 2)) {
+ if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
- if (ha->req_q_cnt < (req_cnt + 2))
+ if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet */
- ha->current_outstanding_cmd = handle;
- ha->outstanding_cmds[handle] = sp;
- sp->ha = ha;
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->vha = vha;
+ sp->que = req;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- ha->req_q_cnt -= req_cnt;
+ req->cnt -= req_cnt;
- cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
+ cmd_pkt = (cmd_entry_t *)req->ring_ptr;
cmd_pkt->handle = handle;
/* Zero out remaining portion of packet. */
clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
wmb();
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
- if (ha->flags.process_response_queue &&
- ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
- qla2x00_process_response_queue(ha);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla2x00_process_response_queue(rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
* Returns non-zero if a failure occurred, else zero.
*/
int
-__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
- uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id,
+ uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
- mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
+ mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
- __func__, ha->host_no));
+ __func__, base_vha->host_no));
return (QLA_FUNCTION_FAILED);
}
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
- mrk24->vp_index = ha->vp_idx;
+ mrk24->vp_index = vha->vp_idx;
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
}
wmb();
- qla2x00_isp_cmd(pha);
+ qla2x00_isp_cmd(vha, req);
return (QLA_SUCCESS);
}
int
-qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
- uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
+ uint8_t type)
{
int ret;
unsigned long flags = 0;
- scsi_qla_host_t *pha = to_qla_parent(ha);
- spin_lock_irqsave(&pha->hardware_lock, flags);
- ret = __qla2x00_marker(ha, loop_id, lun, type);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return (ret);
}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
* Returns NULL if function failed, else, a pointer to the request packet.
*/
static request_t *
-qla2x00_req_pkt(scsi_qla_host_t *ha)
+qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp)
{
- device_reg_t __iomem *reg = ha->iobase;
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
request_t *pkt = NULL;
uint16_t cnt;
uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
/* Wait 1 second for slot. */
for (timer = HZ; timer; timer--) {
- if ((req_cnt + 2) >= ha->req_q_cnt) {
+ if ((req_cnt + 2) >= req->cnt) {
/* Calculate number of free request entries. */
- if (IS_FWI2_CAPABLE(ha))
- cnt = (uint16_t)RD_REG_DWORD(
- &reg->isp24.req_q_out);
- else
- cnt = qla2x00_debounce_register(
- ISP_REQ_Q_OUT(ha, &reg->isp));
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (ha->mqenable)
+ cnt = (uint16_t)
+ RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else {
+ if (IS_FWI2_CAPABLE(ha))
+ cnt = (uint16_t)RD_REG_DWORD(
+ &reg->isp24.req_q_out);
+ else
+ cnt = qla2x00_debounce_register(
+ ISP_REQ_Q_OUT(ha, &reg->isp));
+ }
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
/* If room for request in request ring. */
- if ((req_cnt + 2) < ha->req_q_cnt) {
- ha->req_q_cnt--;
- pkt = ha->request_ring_ptr;
+ if ((req_cnt + 2) < req->cnt) {
+ req->cnt--;
+ pkt = req->ring_ptr;
/* Zero out packet. */
dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
*dword_ptr++ = 0;
/* Set system defined field. */
- pkt->sys_define = (uint8_t)ha->req_ring_index;
+ pkt->sys_define = (uint8_t)req->ring_index;
/* Set entry count. */
pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
}
/* Release ring specific lock */
- spin_unlock(&ha->hardware_lock);
+ spin_unlock_irq(&ha->hardware_lock);
udelay(2); /* 2 us */
/* Check for pending interrupts. */
/* During init we issue marker directly */
- if (!ha->marker_needed && !ha->flags.init_done)
- qla2x00_poll(ha);
-
+ if (!vha->marker_needed && !vha->flags.init_done)
+ qla2x00_poll(rsp);
spin_lock_irq(&ha->hardware_lock);
}
if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
* Note: The caller must hold the hardware lock before calling this routine.
*/
static void
-qla2x00_isp_cmd(scsi_qla_host_t *ha)
+qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
{
- device_reg_t __iomem *reg = ha->iobase;
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
DEBUG5(printk("%s(): IOCB data:\n", __func__));
DEBUG5(qla2x00_dump_buffer(
- (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
+ (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
/* Set chip new ring index. */
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
- } else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ }
+ else {
+ if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
}
}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
{
uint16_t avail_dsds;
uint32_t *cur_dsd;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct req_que *req;
cmd = sp->cmd;
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
return;
}
- ha = sp->ha;
+ vha = sp->vha;
+ req = sp->que;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- sp->fcport->ha->qla_stats.output_bytes +=
+ sp->fcport->vha->hw->qla_stats.output_bytes +=
scsi_bufflen(sp->cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
- sp->fcport->ha->qla_stats.input_bytes +=
+ sp->fcport->vha->hw->qla_stats.input_bytes +=
scsi_bufflen(sp->cmd);
}
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
{
int ret, nseg;
unsigned long flags;
- scsi_qla_host_t *ha, *pha;
- struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
uint32_t index;
uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
uint16_t cnt;
uint16_t req_cnt;
uint16_t tot_dsds;
- struct device_reg_24xx __iomem *reg;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = sp->cmd;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id;
/* Setup device pointers. */
ret = 0;
- ha = sp->ha;
- pha = to_qla_parent(ha);
- reg = &ha->iobase->isp24;
- cmd = sp->cmd;
+ que_id = vha->req_ques[0];
+
+ req = ha->req_q_map[que_id];
+ sp->que = req;
+
+ if (req->rsp)
+ rsp = req->rsp;
+ else
+ rsp = ha->rsp_q_map[que_id];
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0;
/* Send marker if required */
- if (ha->marker_needed != 0) {
- if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
+ != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
- }
- ha->marker_needed = 0;
+ vha->marker_needed = 0;
}
/* Acquire ring specific lock */
- spin_lock_irqsave(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
/* Check for room in outstanding command list. */
- handle = ha->current_outstanding_cmd;
+ handle = req->current_outstanding_cmd;
for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
handle++;
if (handle == MAX_OUTSTANDING_COMMANDS)
handle = 1;
- if (!ha->outstanding_cmds[handle])
+ if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
- if (ha->req_q_cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
- if (ha->req_ring_index < cnt)
- ha->req_q_cnt = cnt - ha->req_ring_index;
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = ha->isp_ops->rd_req_reg(ha, req->id);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- ha->req_q_cnt = ha->request_q_length -
- (ha->req_ring_index - cnt);
+ req->cnt = req->length -
+ (req->ring_index - cnt);
}
- if (ha->req_q_cnt < (req_cnt + 2))
+ if (req->cnt < (req_cnt + 2))
goto queuing_error;
/* Build command packet. */
- ha->current_outstanding_cmd = handle;
- ha->outstanding_cmds[handle] = sp;
- sp->ha = ha;
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->vha = vha;
sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- ha->req_q_cnt -= req_cnt;
+ req->cnt -= req_cnt;
- cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
cmd_pkt->handle = handle;
/* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
wmb();
/* Adjust ring index. */
- ha->req_ring_index++;
- if (ha->req_ring_index == ha->request_q_length) {
- ha->req_ring_index = 0;
- ha->request_ring_ptr = ha->request_ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else
- ha->request_ring_ptr++;
+ req->ring_ptr++;
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
- RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
+ ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
- if (ha->flags.process_response_queue &&
- ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(ha);
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(rsp);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
+
+uint16_t
+qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+ device_reg_t __iomem *reg = (void *) ha->iobase;
+ return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
+}
+
+uint16_t
+qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
+{
+ device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+ return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
+}
+
+void
+qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->iobase;
+ WRT_REG_DWORD(&reg->isp24.req_q_in, index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+}
+
+void
+qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
+ RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index fc4bfa7f839c..d5fb79a88001 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,13 @@
#include <scsi/scsi_tcq.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
-static void qla2x00_status_entry(scsi_qla_host_t *, void *);
+static void qla2x00_process_completed_request(struct scsi_qla_host *,
+ struct req_que *, uint32_t);
+static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
-static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
+static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+ sts_entry_t *);
+static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +30,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
irqreturn_t
qla2100_intr_handler(int irq, void *dev_id)
{
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
struct device_reg_2xxx __iomem *reg;
int status;
unsigned long iter;
uint16_t hccr;
uint16_t mb[4];
+ struct rsp_que *rsp;
- ha = (scsi_qla_host_t *) dev_id;
- if (!ha) {
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
printk(KERN_INFO
- "%s(): NULL host pointer\n", __func__);
+ "%s(): NULL response queue pointer\n", __func__);
return (IRQ_NONE);
}
+ ha = rsp->hw;
reg = &ha->iobase->isp;
status = 0;
spin_lock(&ha->hardware_lock);
+ vha = qla2x00_get_rsp_host(rsp);
for (iter = 50; iter--; ) {
hccr = RD_REG_WORD(&reg->hccr);
if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +66,8 @@ qla2100_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr);
- ha->isp_ops->fw_dump(ha, 1);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
break;
@@ -72,24 +79,24 @@ qla2100_intr_handler(int irq, void *dev_id)
/* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0);
if (mb[0] > 0x3fff && mb[0] < 0x8000) {
- qla2x00_mbx_completion(ha, mb[0]);
+ qla2x00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
mb[3] = RD_MAILBOX_REG(ha, reg, 3);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
} else {
/*EMPTY*/
DEBUG2(printk("scsi(%ld): Unrecognized "
"interrupt type (%d).\n",
- ha->host_no, mb[0]));
+ vha->host_no, mb[0]));
}
/* Release mailbox registers. */
WRT_REG_WORD(&reg->semaphore, 0);
RD_REG_WORD(&reg->semaphore);
} else {
- qla2x00_process_response_queue(ha);
+ qla2x00_process_response_queue(rsp);
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
RD_REG_WORD(&reg->hccr);
@@ -118,25 +125,29 @@ qla2100_intr_handler(int irq, void *dev_id)
irqreturn_t
qla2300_intr_handler(int irq, void *dev_id)
{
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
struct device_reg_2xxx __iomem *reg;
int status;
unsigned long iter;
uint32_t stat;
uint16_t hccr;
uint16_t mb[4];
+ struct rsp_que *rsp;
+ struct qla_hw_data *ha;
- ha = (scsi_qla_host_t *) dev_id;
- if (!ha) {
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
printk(KERN_INFO
- "%s(): NULL host pointer\n", __func__);
+ "%s(): NULL response queue pointer\n", __func__);
return (IRQ_NONE);
}
+ ha = rsp->hw;
reg = &ha->iobase->isp;
status = 0;
spin_lock(&ha->hardware_lock);
+ vha = qla2x00_get_rsp_host(rsp);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +170,8 @@ qla2300_intr_handler(int irq, void *dev_id)
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr);
- ha->isp_ops->fw_dump(ha, 1);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
break;
@@ -170,7 +181,7 @@ qla2300_intr_handler(int irq, void *dev_id)
case 0x2:
case 0x10:
case 0x11:
- qla2x00_mbx_completion(ha, MSW(stat));
+ qla2x00_mbx_completion(vha, MSW(stat));
status |= MBX_INTERRUPT;
/* Release mailbox registers. */
@@ -181,26 +192,26 @@ qla2300_intr_handler(int irq, void *dev_id)
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
mb[3] = RD_MAILBOX_REG(ha, reg, 3);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
- qla2x00_process_response_queue(ha);
+ qla2x00_process_response_queue(rsp);
break;
case 0x15:
mb[0] = MBA_CMPLT_1_16BIT;
mb[1] = MSW(stat);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
break;
case 0x16:
mb[0] = MBA_SCSI_COMPLETION;
mb[1] = MSW(stat);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
"(%d).\n",
- ha->host_no, stat & 0xff));
+ vha->host_no, stat & 0xff));
break;
}
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +234,11 @@ qla2300_intr_handler(int irq, void *dev_id)
* @mb0: Mailbox0 register
*/
static void
-qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
+qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Load return mailbox registers. */
@@ -247,10 +259,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
if (ha->mcp) {
DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, ha->host_no, ha->mcp->mb[0]));
+ __func__, vha->host_no, ha->mcp->mb[0]));
} else {
DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
}
}
@@ -260,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
* @mb: Mailbox registers (0 - 3)
*/
void
-qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
+qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
#define LS_UNKNOWN 2
static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +280,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
uint16_t handle_cnt;
uint16_t cnt;
uint32_t handles[5];
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t rscn_entry, host_pid;
uint8_t rscn_queue_index;
@@ -329,17 +342,19 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
switch (mb[0]) {
case MBA_SCSI_COMPLETION: /* Fast Post */
- if (!ha->flags.online)
+ if (!vha->flags.online)
break;
for (cnt = 0; cnt < handle_cnt; cnt++)
- qla2x00_process_completed_request(ha, handles[cnt]);
+ qla2x00_process_completed_request(vha, rsp->req,
+ handles[cnt]);
break;
case MBA_RESET: /* Reset */
- DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
+ DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
+ vha->host_no));
- set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
break;
case MBA_SYSTEM_ERR: /* System Error */
@@ -347,70 +362,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
mb[1], mb[2], mb[3]);
- qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
- ha->isp_ops->fw_dump(ha, 1);
+ qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+ ha->isp_ops->fw_dump(vha, 1);
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
qla_printk(KERN_ERR, ha,
"Unrecoverable Hardware Error: adapter "
"marked OFFLINE!\n");
- ha->flags.online = 0;
+ vha->flags.online = 0;
} else
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
} else if (mb[1] == 0) {
qla_printk(KERN_INFO, ha,
"Unrecoverable Hardware Error: adapter marked "
"OFFLINE!\n");
- ha->flags.online = 0;
+ vha->flags.online = 0;
} else
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
- qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
- qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
- ha->host_no));
+ vha->host_no));
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
+ DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
mb[1]));
qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(ha, 1);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
}
- if (ha->parent) {
- atomic_set(&ha->vp_state, VP_FAILED);
- fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
}
- set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.management_server_logged_in = 0;
- qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
break;
case MBA_LOOP_UP: /* Loop Up Event */
@@ -425,59 +440,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
}
DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
- ha->host_no, link_speed));
+ vha->host_no, link_speed));
qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
link_speed);
- ha->flags.management_server_logged_in = 0;
- qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
- "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
+ "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
mb[1], mb[2], mb[3]);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- ha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(ha, 1);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ vha->device_flags |= DFLG_NO_CABLE;
+ qla2x00_mark_all_devices_lost(vha, 1);
}
- if (ha->parent) {
- atomic_set(&ha->vp_state, VP_FAILED);
- fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
}
- ha->flags.management_server_logged_in = 0;
+ vha->flags.management_server_logged_in = 0;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
- qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
+ qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
break;
case MBA_LIP_RESET: /* LIP reset occurred */
DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
- ha->host_no, mb[1]));
+ vha->host_no, mb[1]));
qla_printk(KERN_INFO, ha,
"LIP reset occurred (%x).\n", mb[1]);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(ha, 1);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 1);
}
- if (ha->parent) {
- atomic_set(&ha->vp_state, VP_FAILED);
- fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
}
- set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
ha->operating_mode = LOOP;
- ha->flags.management_server_logged_in = 0;
- qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
+ vha->flags.management_server_logged_in = 0;
+ qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
break;
case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -485,33 +500,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
- ha->host_no));
+ vha->host_no));
/*
* Until there's a transition from loop down to loop up, treat
* this as loop down only.
*/
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- if (!atomic_read(&ha->loop_down_timer))
- atomic_set(&ha->loop_down_timer,
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(ha, 1);
+ qla2x00_mark_all_devices_lost(vha, 1);
}
- if (ha->parent) {
- atomic_set(&ha->vp_state, VP_FAILED);
- fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
}
- if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
- set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
- }
- set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
+ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
+ set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
+
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
ha->flags.gpsc_supported = 1;
- ha->flags.management_server_logged_in = 0;
+ vha->flags.management_server_logged_in = 0;
break;
case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
@@ -520,134 +535,137 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
"received.\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_INFO, ha,
"Configuration change detected: value=%x.\n", mb[1]);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
- atomic_set(&ha->loop_state, LOOP_DOWN);
- if (!atomic_read(&ha->loop_down_timer))
- atomic_set(&ha->loop_down_timer,
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(ha, 1);
+ qla2x00_mark_all_devices_lost(vha, 1);
}
- if (ha->parent) {
- atomic_set(&ha->vp_state, VP_FAILED);
- fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
+ if (vha->vp_idx) {
+ atomic_set(&vha->vp_state, VP_FAILED);
+ fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
}
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
case MBA_PORT_UPDATE: /* Port database update */
+ /* Only handle SCNs for our Vport index. */
+ if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
+ break;
+
/*
* If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
* event etc. earlier indicating loop is down) then process
* it. Otherwise ignore it and Wait for RSCN to come in.
*/
- atomic_set(&ha->loop_down_timer, 0);
- if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
- atomic_read(&ha->loop_state) != LOOP_DEAD) {
+ atomic_set(&vha->loop_down_timer, 0);
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ atomic_read(&vha->loop_state) != LOOP_DEAD) {
DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
- "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
+ "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
mb[2], mb[3]));
break;
}
DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
- ha->host_no));
+ vha->host_no));
DEBUG(printk(KERN_INFO
"scsi(%ld): Port database changed %04x %04x %04x.\n",
- ha->host_no, mb[1], mb[2], mb[3]));
+ vha->host_no, mb[1], mb[2], mb[3]));
/*
* Mark all devices as missing so we will login again.
*/
- atomic_set(&ha->loop_state, LOOP_UP);
+ atomic_set(&vha->loop_state, LOOP_UP);
- qla2x00_mark_all_devices_lost(ha, 1);
+ qla2x00_mark_all_devices_lost(vha, 1);
- ha->flags.rscn_queue_overflow = 1;
+ vha->flags.rscn_queue_overflow = 1;
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
/* Check if the Vport has issued a SCR */
- if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
+ if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
break;
/* Only handle SCNs for our Vport index. */
- if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
+ if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
break;
-
DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
- ha->host_no));
+ vha->host_no));
DEBUG(printk(KERN_INFO
"scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
- ha->host_no, mb[1], mb[2], mb[3]));
+ vha->host_no, mb[1], mb[2], mb[3]));
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
- host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
- ha->d_id.b.al_pa;
+ host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
+ | vha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
DEBUG(printk(KERN_INFO
"scsi(%ld): Ignoring RSCN update to local host "
"port ID (%06x)\n",
- ha->host_no, host_pid));
+ vha->host_no, host_pid));
break;
}
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
- rscn_queue_index = ha->rscn_in_ptr + 1;
+ rscn_queue_index = vha->rscn_in_ptr + 1;
if (rscn_queue_index == MAX_RSCN_COUNT)
rscn_queue_index = 0;
- if (rscn_queue_index != ha->rscn_out_ptr) {
- ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
- ha->rscn_in_ptr = rscn_queue_index;
+ if (rscn_queue_index != vha->rscn_out_ptr) {
+ vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
+ vha->rscn_in_ptr = rscn_queue_index;
} else {
- ha->flags.rscn_queue_overflow = 1;
+ vha->flags.rscn_queue_overflow = 1;
}
- atomic_set(&ha->loop_state, LOOP_UPDATE);
- atomic_set(&ha->loop_down_timer, 0);
- ha->flags.management_server_logged_in = 0;
+ atomic_set(&vha->loop_state, LOOP_UPDATE);
+ atomic_set(&vha->loop_down_timer, 0);
+ vha->flags.management_server_logged_in = 0;
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- set_bit(RSCN_UPDATE, &ha->dpc_flags);
- qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
break;
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
- ha->host_no));
+ vha->host_no));
DEBUG(printk(KERN_INFO
"scsi(%ld): [R|Z]IO update completion.\n",
- ha->host_no));
+ vha->host_no));
if (IS_FWI2_CAPABLE(ha))
- qla24xx_process_response_queue(ha);
+ qla24xx_process_response_queue(rsp);
else
- qla2x00_process_response_queue(ha);
+ qla2x00_process_response_queue(rsp);
break;
case MBA_DISCARD_RND_FRAME:
DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
- "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
+ "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
break;
case MBA_TRACE_NOTIFICATION:
DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
- ha->host_no, mb[1], mb[2]));
+ vha->host_no, mb[1], mb[2]));
break;
case MBA_ISP84XX_ALERT:
DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
- "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
+ "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
switch (mb[1]) {
@@ -682,16 +700,22 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
break;
}
- if (!ha->parent && ha->num_vhosts)
- qla2x00_alert_all_vps(ha, mb);
+ if (!vha->vp_idx && ha->num_vhosts)
+ qla2x00_alert_all_vps(rsp, mb);
}
static void
qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
{
fc_port_t *fcport = data;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
- if (fcport->ha->max_q_depth <= sdev->queue_depth)
+ req = ha->req_q_map[vha->req_ques[0]];
+ if (!req)
+ return;
+ if (req->max_q_depth <= sdev->queue_depth)
return;
if (sdev->ordered_tags)
@@ -703,9 +727,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
fcport->last_ramp_up = jiffies;
- DEBUG2(qla_printk(KERN_INFO, fcport->ha,
+ DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
- fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
sdev->queue_depth));
}
@@ -717,20 +741,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
return;
- DEBUG2(qla_printk(KERN_INFO, fcport->ha,
+ DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
"scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
- fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
sdev->queue_depth));
}
static inline void
-qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
+ srb_t *sp)
{
fc_port_t *fcport;
struct scsi_device *sdev;
sdev = sp->cmd->device;
- if (sdev->queue_depth >= ha->max_q_depth)
+ if (sdev->queue_depth >= req->max_q_depth)
return;
fcport = sp->fcport;
@@ -751,25 +776,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
* @index: SRB index
*/
static void
-qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
+qla2x00_process_completed_request(struct scsi_qla_host *vha,
+ struct req_que *req, uint32_t index)
{
srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
/* Validate handle. */
if (index >= MAX_OUTSTANDING_COMMANDS) {
DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
- ha->host_no, index));
+ vha->host_no, index));
qla_printk(KERN_WARNING, ha,
"Invalid SCSI completion handle %d.\n", index);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
return;
}
- sp = ha->outstanding_cmds[index];
+ sp = req->outstanding_cmds[index];
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[index] = NULL;
+ req->outstanding_cmds[index] = NULL;
CMD_COMPL_STATUS(sp->cmd) = 0L;
CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +804,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
/* Save ISP completion status */
sp->cmd->result = DID_OK << 16;
- qla2x00_ramp_up_queue_depth(ha, sp);
+ qla2x00_ramp_up_queue_depth(vha, req, sp);
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha,
"Invalid ISP SCSI completion handle\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
}
@@ -794,32 +821,36 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
* @ha: SCSI driver HA context
*/
void
-qla2x00_process_response_queue(struct scsi_qla_host *ha)
+qla2x00_process_response_queue(struct rsp_que *rsp)
{
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
sts_entry_t *pkt;
uint16_t handle_cnt;
uint16_t cnt;
- if (!ha->flags.online)
+ vha = qla2x00_get_rsp_host(rsp);
+
+ if (!vha->flags.online)
return;
- while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
- pkt = (sts_entry_t *)ha->response_ring_ptr;
+ while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+ pkt = (sts_entry_t *)rsp->ring_ptr;
- ha->rsp_ring_index++;
- if (ha->rsp_ring_index == ha->response_q_length) {
- ha->rsp_ring_index = 0;
- ha->response_ring_ptr = ha->response_ring;
+ rsp->ring_index++;
+ if (rsp->ring_index == rsp->length) {
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
} else {
- ha->response_ring_ptr++;
+ rsp->ring_ptr++;
}
if (pkt->entry_status != 0) {
DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", ha->host_no));
+ "scsi(%ld): Process error entry.\n", vha->host_no));
- qla2x00_error_entry(ha, pkt);
+ qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@@ -827,31 +858,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
switch (pkt->entry_type) {
case STATUS_TYPE:
- qla2x00_status_entry(ha, pkt);
+ qla2x00_status_entry(vha, rsp, pkt);
break;
case STATUS_TYPE_21:
handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
for (cnt = 0; cnt < handle_cnt; cnt++) {
- qla2x00_process_completed_request(ha,
+ qla2x00_process_completed_request(vha, rsp->req,
((sts21_entry_t *)pkt)->handle[cnt]);
}
break;
case STATUS_TYPE_22:
handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
for (cnt = 0; cnt < handle_cnt; cnt++) {
- qla2x00_process_completed_request(ha,
+ qla2x00_process_completed_request(vha, rsp->req,
((sts22_entry_t *)pkt)->handle[cnt]);
}
break;
case STATUS_CONT_TYPE:
- qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
+ qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
"scsi(%ld): Received unknown response pkt type %x "
"entry status=%x.\n",
- ha->host_no, pkt->entry_type, pkt->entry_status));
+ vha->host_no, pkt->entry_type, pkt->entry_status));
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +890,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
}
/* Adjust ring index */
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
+ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
}
static inline void
@@ -881,10 +912,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
sp->request_sense_ptr += sense_len;
sp->request_sense_length -= sense_len;
if (sp->request_sense_length != 0)
- sp->fcport->ha->status_srb = sp;
+ sp->fcport->vha->status_srb = sp;
DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
- "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no,
+ "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
cp->device->channel, cp->device->id, cp->device->lun, cp,
cp->serial_number));
if (sense_len)
@@ -898,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
* @pkt: Entry pointer
*/
static void
-qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
+qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
{
srb_t *sp;
fc_port_t *fcport;
@@ -911,6 +942,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
int32_t resid;
uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
uint8_t *rsp_info, *sense_data;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = rsp->req;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +957,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
- qla2x00_process_completed_request(ha, sts->handle);
+ qla2x00_process_completed_request(vha, req, sts->handle);
return;
}
/* Validate handle. */
if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
- sp = ha->outstanding_cmds[sts->handle];
- ha->outstanding_cmds[sts->handle] = NULL;
+ sp = req->outstanding_cmds[sts->handle];
+ req->outstanding_cmds[sts->handle] = NULL;
} else
sp = NULL;
if (sp == NULL) {
DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- qla2xxx_wake_dpc(ha);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
return;
}
cp = sp->cmd;
if (cp == NULL) {
DEBUG2(printk("scsi(%ld): Command already returned back to OS "
- "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
+ "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
qla_printk(KERN_WARNING, ha,
"Command is NULL: already returned to OS (sp=%p)\n", sp);
@@ -987,7 +1020,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (rsp_info_len > 3 && rsp_info[3]) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
"failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
- "retrying command\n", ha->host_no,
+ "retrying command\n", vha->host_no,
cp->device->channel, cp->device->id,
cp->device->lun, rsp_info_len, rsp_info[0],
rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
@@ -1025,7 +1058,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): Mid-layer underflow "
"detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
+ "error status.\n", vha->host_no,
cp->device->channel, cp->device->id,
cp->device->lun, resid,
scsi_bufflen(cp));
@@ -1039,7 +1072,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
DEBUG2(printk(KERN_INFO
"scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", ha->host_no, comp_status,
+ "0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
/* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1111,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d) UNDERRUN status detected "
"0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
- "os_underflow=0x%x\n", ha->host_no,
+ "os_underflow=0x%x\n", vha->host_no,
cp->device->id, cp->device->lun, comp_status,
scsi_status, resid_len, resid, cp->cmnd[0],
cp->underflow));
@@ -1095,7 +1128,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
DEBUG2(printk(KERN_INFO
"scsi(%ld): QUEUE FULL status detected "
- "0x%x-0x%x.\n", ha->host_no, comp_status,
+ "0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
/*
@@ -1125,10 +1158,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
if (!(scsi_status & SS_RESIDUAL_UNDER)) {
DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
"frame(s) detected (%x of %x bytes)..."
- "retrying command.\n", ha->host_no,
- cp->device->channel, cp->device->id,
- cp->device->lun, resid,
- scsi_bufflen(cp)));
+ "retrying command.\n",
+ vha->host_no, cp->device->channel,
+ cp->device->id, cp->device->lun, resid,
+ scsi_bufflen(cp)));
cp->result = DID_BUS_BUSY << 16;
break;
@@ -1140,7 +1173,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d:%d): Mid-layer underflow "
"detected (%x of %x bytes)...returning "
- "error status.\n", ha->host_no,
+ "error status.\n", vha->host_no,
cp->device->channel, cp->device->id,
cp->device->lun, resid,
scsi_bufflen(cp));
@@ -1157,7 +1190,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
case CS_DATA_OVERRUN:
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
- ha->host_no, cp->device->id, cp->device->lun, comp_status,
+ vha->host_no, cp->device->id, cp->device->lun, comp_status,
scsi_status));
DEBUG2(printk(KERN_INFO
"CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,19 +1216,24 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
*/
DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
"pid=%ld, compl status=0x%x, port state=0x%x\n",
- ha->host_no, cp->device->id, cp->device->lun,
+ vha->host_no, cp->device->id, cp->device->lun,
cp->serial_number, comp_status,
atomic_read(&fcport->state)));
- cp->result = DID_BUS_BUSY << 16;
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
break;
case CS_RESET:
DEBUG2(printk(KERN_INFO
"scsi(%ld): RESET status detected 0x%x-0x%x.\n",
- ha->host_no, comp_status, scsi_status));
+ vha->host_no, comp_status, scsi_status));
cp->result = DID_RESET << 16;
break;
@@ -1208,36 +1246,41 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
*/
DEBUG2(printk(KERN_INFO
"scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
- ha->host_no, comp_status, scsi_status));
+ vha->host_no, comp_status, scsi_status));
cp->result = DID_RESET << 16;
break;
case CS_TIMEOUT:
- cp->result = DID_BUS_BUSY << 16;
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ cp->result = DID_TRANSPORT_DISRUPTED << 16;
if (IS_FWI2_CAPABLE(ha)) {
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d:%d): TIMEOUT status detected "
- "0x%x-0x%x\n", ha->host_no, cp->device->channel,
+ "0x%x-0x%x\n", vha->host_no, cp->device->channel,
cp->device->id, cp->device->lun, comp_status,
scsi_status));
break;
}
DEBUG2(printk(KERN_INFO
"scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
- "sflags=%x.\n", ha->host_no, cp->device->channel,
+ "sflags=%x.\n", vha->host_no, cp->device->channel,
cp->device->id, cp->device->lun, comp_status, scsi_status,
le16_to_cpu(sts->status_flags)));
/* Check to see if logout occurred. */
if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
- qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
break;
default:
DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
- "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
+ "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
qla_printk(KERN_INFO, ha,
"Unknown status detected 0x%x-0x%x.\n",
comp_status, scsi_status);
@@ -1247,7 +1290,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
}
/* Place command on done queue. */
- if (ha->status_srb == NULL)
+ if (vha->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
}
@@ -1259,10 +1302,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
* Extended sense data.
*/
static void
-qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
+qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
{
uint8_t sense_sz = 0;
- srb_t *sp = ha->status_srb;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = vha->status_srb;
struct scsi_cmnd *cp;
if (sp != NULL && sp->request_sense_length != 0) {
@@ -1274,7 +1318,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
"cmd is NULL: already returned to OS (sp=%p)\n",
sp);
- ha->status_srb = NULL;
+ vha->status_srb = NULL;
return;
}
@@ -1295,7 +1339,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sp->request_sense_length == 0) {
- ha->status_srb = NULL;
+ vha->status_srb = NULL;
qla2x00_sp_compl(ha, sp);
}
}
@@ -1307,10 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
* @pkt: Entry pointer
*/
static void
-qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
+qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
-
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = rsp->req;
#if defined(QL_DEBUG_LEVEL_2)
if (pkt->entry_status & RF_INV_E_ORDER)
qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1329,13 +1374,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
/* Validate handle. */
if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
- sp = ha->outstanding_cmds[pkt->handle];
+ sp = req->outstanding_cmds[pkt->handle];
else
sp = NULL;
if (sp) {
/* Free outstanding command slot. */
- ha->outstanding_cmds[pkt->handle] = NULL;
+ req->outstanding_cmds[pkt->handle] = NULL;
/* Bad payload or header */
if (pkt->entry_status &
@@ -1352,12 +1397,12 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha,
"Error entry - invalid handle\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- qla2xxx_wake_dpc(ha);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
}
}
@@ -1367,10 +1412,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
* @mb0: Mailbox0 register
*/
static void
-qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
+qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Load return mailbox registers. */
@@ -1385,10 +1431,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
if (ha->mcp) {
DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, ha->host_no, ha->mcp->mb[0]));
+ __func__, vha->host_no, ha->mcp->mb[0]));
} else {
DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
}
}
@@ -1397,30 +1443,33 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
* @ha: SCSI driver HA context
*/
void
-qla24xx_process_response_queue(struct scsi_qla_host *ha)
+qla24xx_process_response_queue(struct rsp_que *rsp)
{
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct qla_hw_data *ha = rsp->hw;
struct sts_entry_24xx *pkt;
+ struct scsi_qla_host *vha;
+
+ vha = qla2x00_get_rsp_host(rsp);
- if (!ha->flags.online)
+ if (!vha->flags.online)
return;
- while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
- pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
+ while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+ pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
- ha->rsp_ring_index++;
- if (ha->rsp_ring_index == ha->response_q_length) {
- ha->rsp_ring_index = 0;
- ha->response_ring_ptr = ha->response_ring;
+ rsp->ring_index++;
+ if (rsp->ring_index == rsp->length) {
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
} else {
- ha->response_ring_ptr++;
+ rsp->ring_ptr++;
}
if (pkt->entry_status != 0) {
DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", ha->host_no));
+ "scsi(%ld): Process error entry.\n", vha->host_no));
- qla2x00_error_entry(ha, (sts_entry_t *) pkt);
+ qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@@ -1428,13 +1477,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
switch (pkt->entry_type) {
case STATUS_TYPE:
- qla2x00_status_entry(ha, pkt);
+ qla2x00_status_entry(vha, rsp, pkt);
break;
case STATUS_CONT_TYPE:
- qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
+ qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
break;
case VP_RPT_ID_IOCB_TYPE:
- qla24xx_report_id_acquisition(ha,
+ qla24xx_report_id_acquisition(vha,
(struct vp_rpt_id_entry_24xx *)pkt);
break;
default:
@@ -1442,7 +1491,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
DEBUG4(printk(KERN_WARNING
"scsi(%ld): Received unknown response pkt type %x "
"entry status=%x.\n",
- ha->host_no, pkt->entry_type, pkt->entry_status));
+ vha->host_no, pkt->entry_type, pkt->entry_status));
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1450,14 +1499,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
}
/* Adjust ring index */
- WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
+ ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
}
static void
-qla2xxx_check_risc_status(scsi_qla_host_t *ha)
+qla2xxx_check_risc_status(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
if (!IS_QLA25XX(ha))
@@ -1511,25 +1561,29 @@ done:
irqreturn_t
qla24xx_intr_handler(int irq, void *dev_id)
{
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
struct device_reg_24xx __iomem *reg;
int status;
unsigned long iter;
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
+ struct rsp_que *rsp;
- ha = (scsi_qla_host_t *) dev_id;
- if (!ha) {
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
printk(KERN_INFO
- "%s(): NULL host pointer\n", __func__);
+ "%s(): NULL response queue pointer\n", __func__);
return IRQ_NONE;
}
+ ha = rsp->hw;
reg = &ha->iobase->isp24;
status = 0;
spin_lock(&ha->hardware_lock);
+ vha = qla2x00_get_rsp_host(rsp);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
@@ -1537,7 +1591,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
if (ha->hw_event_pause_errors == 0)
- qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+ qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
0, MSW(stat), LSW(stat));
else if (ha->hw_event_pause_errors < 0xffffffff)
ha->hw_event_pause_errors++;
@@ -1547,10 +1601,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
- qla2xxx_check_risc_status(ha);
+ qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(ha, 1);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -1560,7 +1614,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
case 0x2:
case 0x10:
case 0x11:
- qla24xx_mbx_completion(ha, MSW(stat));
+ qla24xx_mbx_completion(vha, MSW(stat));
status |= MBX_INTERRUPT;
break;
@@ -1569,15 +1623,16 @@ qla24xx_intr_handler(int irq, void *dev_id)
mb[1] = RD_REG_WORD(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
- qla24xx_process_response_queue(ha);
+ case 0x14:
+ qla24xx_process_response_queue(rsp);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
"(%d).\n",
- ha->host_no, stat & 0xff));
+ vha->host_no, stat & 0xff));
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1597,15 +1652,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
static irqreturn_t
qla24xx_msix_rsp_q(int irq, void *dev_id)
{
- scsi_qla_host_t *ha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
- ha = dev_id;
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
reg = &ha->iobase->isp24;
spin_lock_irq(&ha->hardware_lock);
- qla24xx_process_response_queue(ha);
+ qla24xx_process_response_queue(rsp);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irq(&ha->hardware_lock);
@@ -1614,20 +1676,64 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
}
static irqreturn_t
+qla25xx_msix_rsp_q(int irq, void *dev_id)
+{
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
+ uint16_t msix_disabled_hccr = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ reg = &ha->iobase->isp24;
+
+ spin_lock_irq(&ha->hardware_lock);
+
+ msix_disabled_hccr = rsp->options;
+ if (!rsp->id)
+ msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
+ else
+ msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
+
+ qla24xx_process_response_queue(rsp);
+
+ if (!msix_disabled_hccr)
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+
+ spin_unlock_irq(&ha->hardware_lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id)
{
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
int status;
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
- ha = dev_id;
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ DEBUG(printk(
+ "%s(): NULL response queue pointer\n", __func__));
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
reg = &ha->iobase->isp24;
status = 0;
spin_lock_irq(&ha->hardware_lock);
+ vha = qla2x00_get_rsp_host(rsp);
do {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
@@ -1635,7 +1741,7 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
if (ha->hw_event_pause_errors == 0)
- qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
+ qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
0, MSW(stat), LSW(stat));
else if (ha->hw_event_pause_errors < 0xffffffff)
ha->hw_event_pause_errors++;
@@ -1645,10 +1751,10 @@ qla24xx_msix_default(int irq, void *dev_id)
qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
"Dumping firmware!\n", hccr);
- qla2xxx_check_risc_status(ha);
+ qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(ha, 1);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -1658,7 +1764,7 @@ qla24xx_msix_default(int irq, void *dev_id)
case 0x2:
case 0x10:
case 0x11:
- qla24xx_mbx_completion(ha, MSW(stat));
+ qla24xx_mbx_completion(vha, MSW(stat));
status |= MBX_INTERRUPT;
break;
@@ -1667,15 +1773,16 @@ qla24xx_msix_default(int irq, void *dev_id)
mb[1] = RD_REG_WORD(&reg->mailbox1);
mb[2] = RD_REG_WORD(&reg->mailbox2);
mb[3] = RD_REG_WORD(&reg->mailbox3);
- qla2x00_async_event(ha, mb);
+ qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
- qla24xx_process_response_queue(ha);
+ case 0x14:
+ qla24xx_process_response_queue(rsp);
break;
default:
DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
"(%d).\n",
- ha->host_no, stat & 0xff));
+ vha->host_no, stat & 0xff));
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1700,70 +1807,138 @@ struct qla_init_msix_entry {
irq_handler_t handler;
};
-static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
- { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
- "qla2xxx (default)", qla24xx_msix_default },
+static struct qla_init_msix_entry base_queue = {
+ .entry = 0,
+ .index = 0,
+ .name = "qla2xxx (default)",
+ .handler = qla24xx_msix_default,
+};
- { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
- "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+static struct qla_init_msix_entry base_rsp_queue = {
+ .entry = 1,
+ .index = 1,
+ .name = "qla2xxx (rsp_q)",
+ .handler = qla24xx_msix_rsp_q,
+};
+
+static struct qla_init_msix_entry multi_rsp_queue = {
+ .entry = 1,
+ .index = 1,
+ .name = "qla2xxx (multi_q)",
+ .handler = qla25xx_msix_rsp_q,
};
static void
-qla24xx_disable_msix(scsi_qla_host_t *ha)
+qla24xx_disable_msix(struct qla_hw_data *ha)
{
int i;
struct qla_msix_entry *qentry;
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[imsix_entries[i].index];
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
if (qentry->have_irq)
- free_irq(qentry->msix_vector, ha);
+ free_irq(qentry->vector, qentry->rsp);
}
pci_disable_msix(ha->pdev);
+ kfree(ha->msix_entries);
+ ha->msix_entries = NULL;
+ ha->flags.msix_enabled = 0;
}
static int
-qla24xx_enable_msix(scsi_qla_host_t *ha)
+qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int i, ret;
- struct msix_entry entries[QLA_MSIX_ENTRIES];
+ struct msix_entry *entries;
struct qla_msix_entry *qentry;
+ struct qla_init_msix_entry *msix_queue;
+
+ entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
- for (i = 0; i < QLA_MSIX_ENTRIES; i++)
- entries[i].entry = imsix_entries[i].entry;
+ for (i = 0; i < ha->msix_count; i++)
+ entries[i].entry = i;
- ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
+ ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
if (ret) {
qla_printk(KERN_WARNING, ha,
- "MSI-X: Failed to enable support -- %d/%d\n",
- QLA_MSIX_ENTRIES, ret);
+ "MSI-X: Failed to enable support -- %d/%d\n"
+ " Retry with %d vectors\n", ha->msix_count, ret, ret);
+ ha->msix_count = ret;
+ ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
+ " support, giving up -- %d/%d\n",
+ ha->msix_count, ret);
+ goto msix_out;
+ }
+ ha->max_queues = ha->msix_count - 1;
+ }
+ ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ ha->msix_count, GFP_KERNEL);
+ if (!ha->msix_entries) {
+ ret = -ENOMEM;
goto msix_out;
}
ha->flags.msix_enabled = 1;
- for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
- qentry = &ha->msix_entries[imsix_entries[i].index];
- qentry->msix_vector = entries[i].vector;
- qentry->msix_entry = entries[i].entry;
+ for (i = 0; i < ha->msix_count; i++) {
+ qentry = &ha->msix_entries[i];
+ qentry->vector = entries[i].vector;
+ qentry->entry = entries[i].entry;
qentry->have_irq = 0;
- ret = request_irq(qentry->msix_vector,
- imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
- if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- imsix_entries[i].index, ret);
- qla24xx_disable_msix(ha);
- goto msix_out;
- }
- qentry->have_irq = 1;
+ qentry->rsp = NULL;
+ }
+
+ /* Enable MSI-X for AENs for queue 0 */
+ qentry = &ha->msix_entries[0];
+ ret = request_irq(qentry->vector, base_queue.handler, 0,
+ base_queue.name, rsp);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ goto msix_out;
+ }
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+
+ /* Enable MSI-X vector for response queue update for queue 0 */
+ if (ha->max_queues > 1 && ha->mqiobase) {
+ ha->mqenable = 1;
+ msix_queue = &multi_rsp_queue;
+ qla_printk(KERN_INFO, ha,
+ "MQ enabled, Number of Queue Resources: %d \n",
+ ha->max_queues);
+ } else {
+ ha->mqenable = 0;
+ msix_queue = &base_rsp_queue;
+ }
+
+ qentry = &ha->msix_entries[1];
+ ret = request_irq(qentry->vector, msix_queue->handler, 0,
+ msix_queue->name, rsp);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
}
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
msix_out:
+ kfree(entries);
return ret;
}
int
-qla2x00_request_irqs(scsi_qla_host_t *ha)
+qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
@@ -1772,11 +1947,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
goto skip_msix;
- if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
- !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
+ if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
+ !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
- ha->chip_revision, ha->fw_attributes));
+ "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
+ ha->pdev->revision, ha->fw_attributes));
goto skip_msix;
}
@@ -1793,7 +1968,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
goto skip_msi;
}
- ret = qla24xx_enable_msix(ha);
+ ret = qla24xx_enable_msix(ha, rsp);
if (!ret) {
DEBUG2(qla_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1815,7 +1990,7 @@ skip_msix:
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
- IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
+ IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
if (ret) {
qla_printk(KERN_WARNING, ha,
"Failed to reserve interrupt %d already in use.\n",
@@ -1823,10 +1998,8 @@ skip_msi:
goto fail;
}
ha->flags.inta_enabled = 1;
- ha->host->irq = ha->pdev->irq;
clear_risc_ints:
- ha->isp_ops->disable_intrs(ha);
spin_lock_irq(&ha->hardware_lock);
if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1843,13 +2016,74 @@ fail:
}
void
-qla2x00_free_irqs(scsi_qla_host_t *ha)
+qla2x00_free_irqs(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+ struct rsp_que *rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
else if (ha->flags.inta_enabled) {
- free_irq(ha->host->irq, ha);
+ free_irq(ha->pdev->irq, rsp);
pci_disable_msi(ha->pdev);
}
}
+
+static struct scsi_qla_host *
+qla2x00_get_rsp_host(struct rsp_que *rsp)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = NULL;
+ struct sts_entry_24xx *pkt;
+ struct req_que *req;
+
+ if (rsp->id) {
+ pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
+ req = rsp->req;
+ if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
+ sp = req->outstanding_cmds[pkt->handle];
+ if (sp)
+ vha = sp->vha;
+ }
+ }
+ if (!vha)
+ /* handle it in base queue */
+ vha = pci_get_drvdata(ha->pdev);
+
+ return vha;
+}
+
+int qla25xx_request_irq(struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = rsp->hw;
+ struct qla_init_msix_entry *intr = &multi_rsp_queue;
+ struct qla_msix_entry *msix = rsp->msix;
+ int ret;
+
+ ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
+ if (ret) {
+ qla_printk(KERN_WARNING, ha,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ msix->vector, ret);
+ return ret;
+ }
+ msix->have_irq = 1;
+ msix->rsp = rsp;
+ return ret;
+}
+
+void
+qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
+ WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
+}
+
+void
+qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
+{
+ device_reg_t __iomem *reg = (void *) ha->iobase;
+ WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 36bc6851e23d..a99976f5fabd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -29,7 +29,7 @@
* Kernel context.
*/
static int
-qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
+qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
{
int rval;
unsigned long flags = 0;
@@ -42,15 +42,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
- scsi_qla_host_t *ha = to_qla_parent(pvha);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
reg = ha->iobase;
- io_lock_on = ha->flags.init_done;
+ io_lock_on = base_vha->flags.init_done;
rval = QLA_SUCCESS;
- abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
/*
* Wait for active mailbox commands to finish by waiting at most tov
@@ -62,7 +63,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
- "Exiting.\n", __func__, ha->host_no));
+ "Exiting.\n", __func__, base_vha->host_no));
return QLA_FUNCTION_TIMEOUT;
}
}
@@ -72,7 +73,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
ha->mcp = mcp;
DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
- ha->host_no, mcp->mb[0]));
+ base_vha->host_no, mcp->mb[0]));
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -100,15 +101,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
#if defined(QL_DEBUG_LEVEL_1)
printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
- __func__, ha->host_no);
+ __func__, base_vha->host_no);
qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
printk("\n");
qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
printk("\n");
qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
printk("\n");
- printk("%s(%ld): I/O address = %p.\n", __func__, ha->host_no, optr);
- qla2x00_dump_regs(ha);
+ printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
+ optr);
+ qla2x00_dump_regs(base_vha);
#endif
/* Issue set host interrupt command to send cmd out. */
@@ -117,7 +119,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
/* Unlock mbx registers and wait for interrupt */
DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
- "jiffies=%lx.\n", __func__, ha->host_no, jiffies));
+ "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
/* Wait for mbx cmd completion until timeout */
@@ -137,7 +139,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
} else {
DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
- ha->host_no, command));
+ base_vha->host_no, command));
if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -151,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
break;
/* Check for pending interrupts. */
- qla2x00_poll(ha);
+ qla2x00_poll(ha->rsp_q_map[0]);
if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
!ha->flags.mbox_int)
@@ -164,7 +166,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
uint16_t *iptr2;
DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
- ha->host_no, command));
+ base_vha->host_no, command));
/* Got interrupt. Clear the flag. */
ha->flags.mbox_int = 0;
@@ -200,12 +202,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
- __func__, ha->host_no, command);
+ __func__, base_vha->host_no, command);
printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
- ha->host_no, ictrl, jiffies);
+ base_vha->host_no, ictrl, jiffies);
printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
- ha->host_no, mb0);
- qla2x00_dump_regs(ha);
+ base_vha->host_no, mb0);
+ qla2x00_dump_regs(base_vha);
#endif
rval = QLA_FUNCTION_TIMEOUT;
@@ -218,10 +220,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
if (abort_active || !io_lock_on) {
DEBUG11(printk("%s(%ld): checking for additional resp "
- "interrupt.\n", __func__, ha->host_no));
+ "interrupt.\n", __func__, base_vha->host_no));
/* polling mode for non isp_abort commands. */
- qla2x00_poll(ha);
+ qla2x00_poll(ha->rsp_q_map[0]);
}
if (rval == QLA_FUNCTION_TIMEOUT &&
@@ -229,35 +231,37 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
/* not in dpc. schedule it for dpc to take over. */
DEBUG(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__, ha->host_no));
+ "isp_abort_needed.\n", __func__,
+ base_vha->host_no));
DEBUG2_3_11(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__, ha->host_no));
+ "isp_abort_needed.\n", __func__,
+ base_vha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occurred. Scheduling ISP "
"abort.\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- qla2xxx_wake_dpc(ha);
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
} else if (!abort_active) {
/* call abort directly since we are in the DPC thread */
DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
- __func__, ha->host_no));
+ __func__, base_vha->host_no));
DEBUG2_3_11(printk("%s(%ld): timeout calling "
- "abort_isp\n", __func__, ha->host_no));
+ "abort_isp\n", __func__, base_vha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occurred. Issuing ISP "
"abort.\n");
- set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- if (qla2x00_abort_isp(ha)) {
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+ if (qla2x00_abort_isp(base_vha)) {
/* Failed. retry later. */
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
- ha->host_no));
+ base_vha->host_no));
DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
- __func__, ha->host_no));
+ __func__, base_vha->host_no));
}
}
@@ -267,24 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
if (rval) {
DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
- "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no,
+ "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__,
+ base_vha->host_no));
}
return rval;
}
int
-qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
+qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
uint32_t risc_code_size)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -312,13 +318,13 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -340,13 +346,14 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
* Kernel context.
*/
int
-qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
+qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -369,18 +376,18 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
if (IS_FWI2_CAPABLE(ha)) {
DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
- __func__, ha->host_no, mcp->mb[1]));
+ __func__, vha->host_no, mcp->mb[1]));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__,
- ha->host_no));
+ vha->host_no));
}
}
@@ -404,28 +411,28 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
* Kernel context.
*/
void
-qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
+qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox data. */
*major = mcp->mb[1];
*minor = mcp->mb[2];
*subminor = mcp->mb[3];
*attributes = mcp->mb[6];
- if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
*memory = 0x1FFFF; /* Defaults to 128KB. */
else
*memory = (mcp->mb[5] << 16) | mcp->mb[4];
@@ -433,10 +440,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
}
@@ -455,32 +462,32 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
* Kernel context.
*/
int
-qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
+qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
fwopts[0] = mcp->mb[0];
fwopts[1] = mcp->mb[1];
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -502,13 +509,13 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
* Kernel context.
*/
int
-qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
+qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -516,7 +523,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
mcp->mb[3] = fwopts[3];
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->in_mb |= MBX_1;
} else {
mcp->mb[10] = fwopts[10];
@@ -526,17 +533,17 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
}
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
fwopts[0] = mcp->mb[0];
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
- ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -558,13 +565,14 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
* Kernel context.
*/
int
-qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
+qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no));
+ DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -578,7 +586,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
@@ -591,7 +599,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
struct device_reg_24xx __iomem *reg =
&ha->iobase->isp24;
- qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0,
+ qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
LSW(RD_REG_DWORD(&reg->hccr)),
LSW(RD_REG_DWORD(&reg->istatus)));
}
@@ -600,11 +608,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -626,18 +634,18 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
* Kernel context.
*/
int
-qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
+qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[1] = MSW(risc_addr);
mcp->mb[2] = LSW(risc_addr);
mcp->out_mb |= MBX_2|MBX_1;
@@ -650,14 +658,14 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
- ha->host_no, rval, IS_FWI2_CAPABLE(ha) ?
+ vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
(mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -682,7 +690,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
* Kernel context.
*/
static int
-qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
+qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
dma_addr_t phys_addr, size_t size, uint32_t tov)
{
int rval;
@@ -699,30 +707,28 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
mcp->in_mb = MBX_2|MBX_0;
mcp->tov = tov;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
- ha->host_no, rval));
- DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
sts_entry_t *sts_entry = (sts_entry_t *) buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
- IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK;
+ IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
}
return rval;
}
int
-qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
+qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
size_t size)
{
- return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size,
+ return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
MBX_TOV_SECONDS);
}
@@ -741,22 +747,23 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
* Kernel context.
*/
int
-qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
{
unsigned long flags = 0;
fc_port_t *fcport;
int rval;
- uint32_t handle;
+ uint32_t handle = 0;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no));
+ DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
fcport = sp->fcport;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
- if (ha->outstanding_cmds[handle] == sp)
+ if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -778,14 +785,14 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -797,40 +804,45 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
int rval, rval2;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
+ struct req_que *req;
+ struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
l = l;
- ha = fcport->ha;
+ vha = fcport->vha;
+ req = vha->hw->req_q_map[0];
+ rsp = vha->hw->rsp_q_map[0];
mcp->mb[0] = MBC_ABORT_TARGET;
mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
- if (HAS_EXTENDED_IDS(ha)) {
+ if (HAS_EXTENDED_IDS(vha->hw)) {
mcp->mb[1] = fcport->loop_id;
mcp->mb[10] = 0;
mcp->out_mb |= MBX_10;
} else {
mcp->mb[1] = fcport->loop_id << 8;
}
- mcp->mb[2] = ha->loop_reset_delay;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[2] = vha->hw->loop_reset_delay;
+ mcp->mb[9] = vha->vp_idx;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID);
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
+ MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, ha->host_no, rval2));
+ "(%x).\n", __func__, vha->host_no, rval2));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -842,37 +854,42 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
int rval, rval2;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *vha;
+ struct req_que *req;
+ struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
- ha = fcport->ha;
+ vha = fcport->vha;
+ req = vha->hw->req_q_map[0];
+ rsp = vha->hw->rsp_q_map[0];
mcp->mb[0] = MBC_LUN_RESET;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
- if (HAS_EXTENDED_IDS(ha))
+ if (HAS_EXTENDED_IDS(vha->hw))
mcp->mb[1] = fcport->loop_id;
else
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = l;
mcp->mb[3] = 0;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN);
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
+ MK_SYNC_ID_LUN);
if (rval2 != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, ha->host_no, rval2));
+ "(%x).\n", __func__, vha->host_no, rval2));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -899,7 +916,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
* Kernel context.
*/
int
-qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
+qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
{
int rval;
@@ -907,15 +924,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_0;
mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (mcp->mb[0] == MBS_COMMAND_ERROR)
rval = QLA_COMMAND_ERROR;
else if (mcp->mb[0] == MBS_INVALID_COMMAND)
@@ -932,11 +949,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -958,7 +975,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
* Kernel context.
*/
int
-qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
+qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
uint16_t *r_a_tov)
{
int rval;
@@ -967,19 +984,19 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
- ha->host_no, mcp->mb[0]));
+ vha->host_no, mcp->mb[0]));
} else {
/* Convert returned data and check our values. */
*r_a_tov = mcp->mb[3] / 2;
@@ -991,7 +1008,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
}
DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
- "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov));
+ "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
}
return rval;
@@ -1015,14 +1032,15 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
* Kernel context.
*/
int
-qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
+qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
@@ -1040,17 +1058,17 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
mcp->buf_size = size;
mcp->flags = MBX_DMA_OUT;
mcp->tov = MBX_TOV_SECONDS;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
"mb0=%x.\n",
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1073,7 +1091,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
* Kernel context.
*/
int
-qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
+qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
{
int rval;
mbx_cmd_t mc;
@@ -1081,14 +1099,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
port_database_t *pd;
struct port_database_24xx *pd24;
dma_addr_t pd_dma;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
- "structure.\n", __func__, ha->host_no));
+ "structure.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1100,7 +1119,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
mcp->mb[3] = LSW(pd_dma);
mcp->mb[6] = MSW(MSD(pd_dma));
mcp->mb[7] = LSW(MSD(pd_dma));
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_0;
if (IS_FWI2_CAPABLE(ha)) {
@@ -1120,7 +1139,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
mcp->flags = MBX_DMA_IN;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS)
goto gpd_error_out;
@@ -1132,7 +1151,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
pd24->last_login_state != PDS_PRLI_COMPLETE) {
DEBUG2(printk("%s(%ld): Unable to verify "
"login-state (%x/%x) for loop_id %x\n",
- __func__, ha->host_no,
+ __func__, vha->host_no,
pd24->current_login_state,
pd24->last_login_state, fcport->loop_id));
rval = QLA_FUNCTION_FAILED;
@@ -1192,9 +1211,9 @@ gpd_error_out:
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -1217,21 +1236,21 @@ gpd_error_out:
* Kernel context.
*/
int
-qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
+qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
/* Return firmware states. */
states[0] = mcp->mb[1];
@@ -1241,11 +1260,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
- "failed=%x.\n", ha->host_no, rval));
+ "failed=%x.\n", vha->host_no, rval));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1270,7 +1289,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
* Kernel context.
*/
int
-qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
+qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
uint8_t opt)
{
int rval;
@@ -1278,12 +1297,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_GET_PORT_NAME;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_1|MBX_0;
- if (HAS_EXTENDED_IDS(ha)) {
+ if (HAS_EXTENDED_IDS(vha->hw)) {
mcp->mb[1] = loop_id;
mcp->mb[10] = opt;
mcp->out_mb |= MBX_10;
@@ -1294,12 +1313,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
if (name != NULL) {
/* This function returns name in big endian. */
@@ -1314,7 +1333,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
}
DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1336,45 +1355,45 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
* Kernel context.
*/
int
-qla2x00_lip_reset(scsi_qla_host_t *ha)
+qla2x00_lip_reset(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = BIT_6;
mcp->mb[2] = 0;
- mcp->mb[3] = ha->loop_reset_delay;
+ mcp->mb[3] = vha->hw->loop_reset_delay;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
} else {
mcp->mb[0] = MBC_LIP_RESET;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
- if (HAS_EXTENDED_IDS(ha)) {
+ if (HAS_EXTENDED_IDS(vha->hw)) {
mcp->mb[1] = 0x00ff;
mcp->mb[10] = 0;
mcp->out_mb |= MBX_10;
} else {
mcp->mb[1] = 0xff00;
}
- mcp->mb[2] = ha->loop_reset_delay;
+ mcp->mb[2] = vha->hw->loop_reset_delay;
mcp->mb[3] = 0;
}
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
- __func__, ha->host_no, rval));
+ __func__, vha->host_no, rval));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -1399,7 +1418,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
* Kernel context.
*/
int
-qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
+qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
uint16_t cmd_size, size_t buf_size)
{
int rval;
@@ -1407,10 +1426,11 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
- "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov));
+ "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
+ mcp->tov));
mcp->mb[0] = MBC_SEND_SNS_COMMAND;
mcp->mb[1] = cmd_size;
@@ -1422,25 +1442,25 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
mcp->in_mb = MBX_0|MBX_1;
mcp->buf_size = buf_size;
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
- mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
- rval = qla2x00_mailbox_command(ha, mcp);
+ mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no));
+ DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
}
return rval;
}
int
-qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
{
int rval;
@@ -1448,13 +1468,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
struct logio_entry_24xx *lg;
dma_addr_t lg_dma;
uint32_t iop[2];
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1470,14 +1491,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = ha->vp_idx;
- rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
+ lg->vp_index = vha->vp_idx;
+ rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
- "(%x).\n", __func__, ha->host_no, rval));
+ "(%x).\n", __func__, vha->host_no, rval));
} else if (lg->entry_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, vha->host_no,
lg->entry_status));
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
@@ -1486,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
"-- completion status (%x) ioparam=%x/%x.\n", __func__,
- ha->host_no, le16_to_cpu(lg->comp_status), iop[0],
+ vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
iop[1]));
switch (iop[0]) {
@@ -1515,7 +1536,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1562,14 +1583,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
* Kernel context.
*/
int
-qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no));
+ DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1586,7 +1608,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox statuses. */
if (mb != NULL) {
@@ -1613,12 +1635,12 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
- "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval,
+ "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
mcp->mb[0], mcp->mb[1], mcp->mb[2]));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1641,19 +1663,20 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
*
*/
int
-qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
uint16_t *mb_ret, uint8_t opt)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
if (IS_FWI2_CAPABLE(ha))
- return qla24xx_login_fabric(ha, fcport->loop_id,
+ return qla24xx_login_fabric(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb_ret, opt);
- DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
if (HAS_EXTENDED_IDS(ha))
@@ -1665,7 +1688,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox statuses. */
if (mb_ret != NULL) {
@@ -1686,33 +1709,34 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
rval = QLA_SUCCESS;
DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
+ "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval,
+ "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
} else {
/*EMPTY*/
- DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return (rval);
}
int
-qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
uint8_t area, uint8_t al_pa)
{
int rval;
struct logio_entry_24xx *lg;
dma_addr_t lg_dma;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1725,25 +1749,26 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
lg->port_id[0] = al_pa;
lg->port_id[1] = area;
lg->port_id[2] = domain;
- lg->vp_index = ha->vp_idx;
- rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
+ lg->vp_index = vha->vp_idx;
+
+ rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
- "(%x).\n", __func__, ha->host_no, rval));
+ "(%x).\n", __func__, vha->host_no, rval));
} else if (lg->entry_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, vha->host_no,
lg->entry_status));
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
"-- completion status (%x) ioparam=%x/%x.\n", __func__,
- ha->host_no, le16_to_cpu(lg->comp_status),
+ vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
le32_to_cpu(lg->io_parameter[0]),
le32_to_cpu(lg->io_parameter[1])));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1768,7 +1793,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
* Kernel context.
*/
int
-qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
+qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
uint8_t area, uint8_t al_pa)
{
int rval;
@@ -1776,11 +1801,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
- if (HAS_EXTENDED_IDS(ha)) {
+ if (HAS_EXTENDED_IDS(vha->hw)) {
mcp->mb[1] = loop_id;
mcp->mb[10] = 0;
mcp->out_mb |= MBX_10;
@@ -1791,16 +1816,16 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
- "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1]));
+ "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1822,33 +1847,33 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
* Kernel context.
*/
int
-qla2x00_full_login_lip(scsi_qla_host_t *ha)
+qla2x00_full_login_lip(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
- mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0;
+ mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1867,7 +1892,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
* Kernel context.
*/
int
-qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
+qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
uint16_t *entries)
{
int rval;
@@ -1875,20 +1900,20 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
- ha->host_no));
+ vha->host_no));
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_ID_LIST;
mcp->out_mb = MBX_0;
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[2] = MSW(id_list_dma);
mcp->mb[3] = LSW(id_list_dma);
mcp->mb[6] = MSW(MSD(id_list_dma));
mcp->mb[7] = LSW(MSD(id_list_dma));
mcp->mb[8] = 0;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
} else {
mcp->mb[1] = MSW(id_list_dma);
@@ -1900,16 +1925,16 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
*entries = mcp->mb[1];
DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
- ha->host_no));
+ vha->host_no));
}
return rval;
@@ -1929,7 +1954,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
* Kernel context.
*/
int
-qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
+qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
{
@@ -1937,22 +1962,22 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
- ha->host_no, mcp->mb[0]));
+ vha->host_no, mcp->mb[0]));
} else {
DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
- "mb7=%x mb10=%x mb11=%x.\n", __func__, ha->host_no,
+ "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
mcp->mb[10], mcp->mb[11]));
@@ -1964,7 +1989,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
*cur_iocb_cnt = mcp->mb[7];
if (orig_iocb_cnt)
*orig_iocb_cnt = mcp->mb[10];
- if (max_npiv_vports)
+ if (vha->hw->flags.npiv_supported && max_npiv_vports)
*max_npiv_vports = mcp->mb[11];
}
@@ -1987,18 +2012,19 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
char *pmap;
dma_addr_t pmap_dma;
+ struct qla_hw_data *ha = vha->hw;
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
- __func__, ha->host_no));
+ __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2013,11 +2039,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
mcp->buf_size = FCAL_MAP_SIZE;
mcp->flags = MBX_DMA_IN;
mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
- "size (%x)\n", __func__, ha->host_no, mcp->mb[0],
+ "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
mcp->mb[1], (unsigned)pmap[0]));
DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
@@ -2028,9 +2054,9 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -2051,15 +2077,16 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
* BIT_1 = mailbox error.
*/
int
-qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
+qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
struct link_statistics *stats, dma_addr_t stats_dma)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2084,12 +2111,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
}
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, ha->host_no, mcp->mb[0]));
+ __func__, vha->host_no, mcp->mb[0]));
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
@@ -2101,14 +2128,14 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
} else {
/* Failed. */
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
}
return rval;
}
int
-qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
+qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
dma_addr_t stats_dma)
{
int rval;
@@ -2116,7 +2143,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2124,18 +2151,18 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
mcp->mb[6] = MSW(MSD(stats_dma));
mcp->mb[7] = LSW(MSD(stats_dma));
mcp->mb[8] = sizeof(struct link_statistics) / 4;
- mcp->mb[9] = ha->vp_idx;
+ mcp->mb[9] = vha->vp_idx;
mcp->mb[10] = 0;
mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, ha->host_no, mcp->mb[0]));
+ __func__, vha->host_no, mcp->mb[0]));
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
@@ -2147,14 +2174,14 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
} else {
/* Failed. */
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
}
return rval;
}
int
-qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
+qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
{
int rval;
fc_port_t *fcport;
@@ -2163,18 +2190,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
struct abort_entry_24xx *abt;
dma_addr_t abt_dma;
uint32_t handle;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
fcport = sp->fcport;
- spin_lock_irqsave(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
- if (pha->outstanding_cmds[handle] == sp)
+ if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (handle == MAX_OUTSTANDING_COMMANDS) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -2183,7 +2210,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
if (abt == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2196,22 +2223,25 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
abt->vp_index = fcport->vp_idx;
- rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
+
+ abt->req_que_no = cpu_to_le16(req->id);
+
+ rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
- __func__, ha->host_no, rval));
+ __func__, vha->host_no, rval));
} else if (abt->entry_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, vha->host_no,
abt->entry_status));
rval = QLA_FUNCTION_FAILED;
} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, ha->host_no,
+ "-- completion status (%x).\n", __func__, vha->host_no,
le16_to_cpu(abt->nport_handle)));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2233,16 +2263,21 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
int rval, rval2;
struct tsk_mgmt_cmd *tsk;
dma_addr_t tsk_dma;
- scsi_qla_host_t *ha, *pha;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct req_que *req;
+ struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
- ha = fcport->ha;
- pha = to_qla_parent(ha);
- tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ vha = fcport->vha;
+ ha = vha->hw;
+ req = ha->req_q_map[0];
+ rsp = ha->rsp_q_map[0];
+ tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
- "IOCB.\n", __func__, ha->host_no));
+ "IOCB.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2262,34 +2297,34 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
sizeof(tsk->p.tsk.lun));
}
- rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
+ rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
- "(%x).\n", __func__, ha->host_no, name, rval));
+ "(%x).\n", __func__, vha->host_no, name, rval));
} else if (tsk->p.sts.entry_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, vha->host_no,
tsk->p.sts.entry_status));
rval = QLA_FUNCTION_FAILED;
} else if (tsk->p.sts.comp_status !=
__constant_cpu_to_le16(CS_COMPLETE)) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
"-- completion status (%x).\n", __func__,
- ha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
+ vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
rval = QLA_FUNCTION_FAILED;
}
/* Issue marker IOCB. */
- rval2 = qla2x00_marker(ha, fcport->loop_id, l,
+ rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, ha->host_no, rval2));
+ "(%x).\n", __func__, vha->host_no, rval2));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
- dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
+ dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
return rval;
}
@@ -2307,29 +2342,30 @@ qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
}
int
-qla2x00_system_error(scsi_qla_host_t *ha)
+qla2x00_system_error(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -2342,14 +2378,14 @@ qla2x00_system_error(scsi_qla_host_t *ha)
* Returns
*/
int
-qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
+qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
uint16_t sw_em_2g, uint16_t sw_em_4g)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2360,61 +2396,61 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
int
-qla2x00_stop_firmware(scsi_qla_host_t *ha)
+qla2x00_stop_firmware(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
int
-qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
+qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
uint16_t buffers)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_ENABLE;
@@ -2428,28 +2464,28 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
int
-qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
+qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_DISABLE;
@@ -2457,29 +2493,29 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
int
-qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
+qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
uint16_t buffers, uint16_t *mb, uint32_t *dwords)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA25XX(ha))
+ if (!IS_QLA25XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_ENABLE;
@@ -2497,12 +2533,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2514,16 +2550,16 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
}
int
-qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
+qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_DISABLE;
@@ -2533,12 +2569,12 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2556,17 +2592,17 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
}
int
-qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
+qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
uint16_t off, uint16_t count)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_READ_SFP;
mcp->mb[1] = addr;
@@ -2581,30 +2617,30 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
int
-qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
+qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
uint16_t port_speed, uint16_t *mb)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_IIDMA_CAPABLE(ha))
+ if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
@@ -2615,7 +2651,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
/* Return mailbox statuses. */
if (mb != NULL) {
@@ -2628,28 +2664,29 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
}
void
-qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
+qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct vp_rpt_id_entry_24xx *rptid_entry)
{
uint8_t vp_idx;
uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
- scsi_qla_host_t *vha;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
- " number of VPs acquired %d\n", __func__, ha->host_no,
+ " number of VPs acquired %d\n", __func__, vha->host_no,
MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2658,7 +2695,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
vp_idx = LSB(stat);
DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
"- status %d - "
- "with port id %02x%02x%02x\n",__func__,ha->host_no,
+ "with port id %02x%02x%02x\n", __func__, vha->host_no,
vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]));
@@ -2668,25 +2705,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
if (MSB(stat) == 1)
return;
- list_for_each_entry(vha, &ha->vp_list, vp_list)
- if (vp_idx == vha->vp_idx)
+ list_for_each_entry(vp, &ha->vp_list, list)
+ if (vp_idx == vp->vp_idx)
break;
-
- if (!vha)
+ if (!vp)
return;
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
+ vp->d_id.b.domain = rptid_entry->port_id[2];
+ vp->d_id.b.area = rptid_entry->port_id[1];
+ vp->d_id.b.al_pa = rptid_entry->port_id[0];
/*
* Cannot configure here as we are still sitting on the
* response queue. Handle it in dpc context.
*/
- set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
- set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+ set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(ha);
+ qla2xxx_wake_dpc(vha);
}
}
@@ -2709,15 +2745,15 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
int rval;
struct vp_config_entry_24xx *vpmod;
dma_addr_t vpmod_dma;
- scsi_qla_host_t *pha;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
/* This can be called by the parent */
- pha = to_qla_parent(vha);
- vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
- "IOCB.\n", __func__, pha->host_no));
+ "IOCB.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -2732,26 +2768,27 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
vpmod->entry_count = 1;
- rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0);
+ rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
- "(%x).\n", __func__, pha->host_no, rval));
+ "(%x).\n", __func__, base_vha->host_no, rval));
} else if (vpmod->comp_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, pha->host_no,
+ "-- error status (%x).\n", __func__, base_vha->host_no,
vpmod->comp_status));
rval = QLA_FUNCTION_FAILED;
} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, pha->host_no,
+ "-- completion status (%x).\n", __func__, base_vha->host_no,
le16_to_cpu(vpmod->comp_status)));
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__,
+ base_vha->host_no));
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
- dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma);
+ dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
return rval;
}
@@ -2778,11 +2815,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int map, pos;
struct vp_ctrl_entry_24xx *vce;
dma_addr_t vce_dma;
- scsi_qla_host_t *ha = vha->parent;
+ struct qla_hw_data *ha = vha->hw;
int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
- ha->host_no, vp_index));
+ vha->host_no, vp_index));
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
@@ -2791,7 +2829,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
if (!vce) {
DEBUG2_3(printk("%s(%ld): "
"failed to allocate VP Control IOCB.\n", __func__,
- ha->host_no));
+ base_vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -2810,30 +2848,30 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
vce->vp_idx_map[map] |= 1 << pos;
mutex_unlock(&ha->vport_lock);
- rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0);
+ rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, ha->host_no, rval));
+ "(%x).\n", __func__, base_vha->host_no, rval));
printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, ha->host_no, rval);
+ "(%x).\n", __func__, base_vha->host_no, rval);
} else if (vce->entry_status != 0) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, base_vha->host_no,
vce->entry_status));
printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, ha->host_no,
+ "-- error status (%x).\n", __func__, base_vha->host_no,
vce->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, ha->host_no,
+ "-- completion status (%x).\n", __func__, base_vha->host_no,
le16_to_cpu(vce->comp_status)));
printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, ha->host_no,
+ "-- completion status (%x).\n", __func__, base_vha->host_no,
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -2863,7 +2901,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
*/
int
-qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
+qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
uint16_t vp_idx)
{
int rval;
@@ -2884,7 +2922,7 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
mcp->in_mb = MBX_0|MBX_1;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -2897,16 +2935,16 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
}
int
-qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
+qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
uint32_t size)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
- if (MSW(addr) || IS_FWI2_CAPABLE(ha)) {
+ if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
mcp->mb[8] = MSW(addr);
mcp->out_mb = MBX_8|MBX_0;
@@ -2920,7 +2958,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
mcp->mb[6] = MSW(MSD(req_dma));
mcp->mb[7] = LSW(MSD(req_dma));
mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[4] = MSW(size);
mcp->mb[5] = LSW(size);
mcp->out_mb |= MBX_5|MBX_4;
@@ -2932,13 +2970,13 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
mcp->in_mb = MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- ha->host_no, rval, mcp->mb[0]));
+ vha->host_no, rval, mcp->mb[0]));
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
}
return rval;
@@ -2954,20 +2992,21 @@ struct cs84xx_mgmt_cmd {
};
int
-qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
+qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
{
int rval, retry;
struct cs84xx_mgmt_cmd *mn;
dma_addr_t mn_dma;
uint16_t options;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
- DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+ DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
- "IOCB.\n", __func__, ha->host_no));
+ "IOCB.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -2986,19 +3025,19 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
mn->p.req.options = cpu_to_le16(options);
DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
- ha->host_no));
+ vha->host_no));
DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
sizeof(*mn)));
- rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120);
+ rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval != QLA_SUCCESS) {
DEBUG2_16(printk("%s(%ld): failed to issue Verify "
- "IOCB (%x).\n", __func__, ha->host_no, rval));
+ "IOCB (%x).\n", __func__, vha->host_no, rval));
goto verify_done;
}
DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
- ha->host_no));
+ vha->host_no));
DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
sizeof(*mn)));
@@ -3006,21 +3045,21 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
- ha->host_no, status[0], status[1]));
+ vha->host_no, status[0], status[1]));
if (status[0] != CS_COMPLETE) {
rval = QLA_FUNCTION_FAILED;
if (!(options & VCO_DONT_UPDATE_FW)) {
DEBUG2_16(printk("%s(%ld): Firmware update "
"failed. Retrying without update "
- "firmware.\n", __func__, ha->host_no));
+ "firmware.\n", __func__, vha->host_no));
options |= VCO_DONT_UPDATE_FW;
options &= ~VCO_FORCE_UPDATE;
retry = 1;
}
} else {
DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
- __func__, ha->host_no,
+ __func__, vha->host_no,
le32_to_cpu(mn->p.rsp.fw_ver)));
/* NOTE: we only update OP firmware. */
@@ -3037,10 +3076,115 @@ verify_done:
if (rval != QLA_SUCCESS) {
DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
- ha->host_no, rval));
+ vha->host_no, rval));
} else {
- DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ }
+
+ return rval;
+}
+
+int
+qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req,
+ uint8_t options)
+{
+ int rval;
+ unsigned long flags;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct device_reg_25xxmq __iomem *reg;
+ struct qla_hw_data *ha = vha->hw;
+
+ mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(req->dma));
+ mcp->mb[3] = LSW(LSD(req->dma));
+ mcp->mb[6] = MSW(MSD(req->dma));
+ mcp->mb[7] = LSW(MSD(req->dma));
+ mcp->mb[5] = req->length;
+ if (req->rsp)
+ mcp->mb[10] = req->rsp->id;
+ mcp->mb[12] = req->qos;
+ mcp->mb[11] = req->vp_idx;
+ mcp->mb[13] = req->rid;
+
+ reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ QLA_QUE_PAGE * req->id);
+
+ mcp->mb[4] = req->id;
+ /* que in ptr index */
+ mcp->mb[8] = 0;
+ /* que out ptr index */
+ mcp->mb[9] = 0;
+ mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
+ MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = 60;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(options & BIT_0)) {
+ WRT_REG_DWORD(&reg->req_q_in, 0);
+ WRT_REG_DWORD(&reg->req_q_out, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
+ __func__, vha->host_no, rval, mcp->mb[0]));
+ return rval;
+}
+
+int
+qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
+ uint8_t options)
+{
+ int rval;
+ unsigned long flags;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct device_reg_25xxmq __iomem *reg;
+ struct qla_hw_data *ha = vha->hw;
+
+ mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(rsp->dma));
+ mcp->mb[3] = LSW(LSD(rsp->dma));
+ mcp->mb[6] = MSW(MSD(rsp->dma));
+ mcp->mb[7] = LSW(MSD(rsp->dma));
+ mcp->mb[5] = rsp->length;
+ mcp->mb[11] = rsp->vp_idx;
+ mcp->mb[14] = rsp->msix->vector;
+ mcp->mb[13] = rsp->rid;
+
+ reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ QLA_QUE_PAGE * rsp->id);
+
+ mcp->mb[4] = rsp->id;
+ /* que in ptr index */
+ mcp->mb[8] = 0;
+ /* que out ptr index */
+ mcp->mb[9] = 0;
+ mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
+ |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = 60;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(options & BIT_0)) {
+ WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ WRT_REG_DWORD(&reg->rsp_q_in, 0);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS)
+ DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
+ "mb0=%x.\n", __func__,
+ vha->host_no, rval, mcp->mb[0]));
return rval;
}
+
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 93560cd72784..386ffeae5b5a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_gbl.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@@ -18,7 +19,7 @@
void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
- if (vha->parent && vha->timer_active) {
+ if (vha->vp_idx && vha->timer_active) {
del_timer_sync(&vha->timer);
vha->timer_active = 0;
}
@@ -28,7 +29,7 @@ static uint32_t
qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
{
uint32_t vp_id;
- scsi_qla_host_t *ha = vha->parent;
+ struct qla_hw_data *ha = vha->hw;
/* Find an empty slot and assign an vp_id */
mutex_lock(&ha->vport_lock);
@@ -44,7 +45,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
ha->num_vhosts++;
ha->cur_vport_count++;
vha->vp_idx = vp_id;
- list_add_tail(&vha->vp_list, &ha->vp_list);
+ list_add_tail(&vha->list, &ha->vp_list);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -53,24 +54,24 @@ void
qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
{
uint16_t vp_id;
- scsi_qla_host_t *ha = vha->parent;
+ struct qla_hw_data *ha = vha->hw;
mutex_lock(&ha->vport_lock);
vp_id = vha->vp_idx;
ha->num_vhosts--;
ha->cur_vport_count--;
clear_bit(vp_id, ha->vp_idx_map);
- list_del(&vha->vp_list);
+ list_del(&vha->list);
mutex_unlock(&ha->vport_lock);
}
static scsi_qla_host_t *
-qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
+qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
{
scsi_qla_host_t *vha;
/* Locate matching device in database. */
- list_for_each_entry(vha, &ha->vp_list, vp_list) {
+ list_for_each_entry(vha, &ha->vp_list, list) {
if (!memcmp(port_name, vha->port_name, WWN_SIZE))
return vha;
}
@@ -94,16 +95,13 @@ static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
{
fc_port_t *fcport;
- scsi_qla_host_t *pha = to_qla_parent(vha);
-
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (fcport->vp_idx != vha->vp_idx)
- continue;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
DEBUG15(printk("scsi(%ld): Marking port dead, "
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
+ atomic_set(&fcport->state, FCS_DEVICE_DEAD);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
atomic_set(&fcport->state, FCS_UNCONFIGURED);
}
@@ -118,7 +116,6 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- /* Delete all vp's fcports from parent's list */
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
@@ -135,11 +132,12 @@ int
qla24xx_enable_vp(scsi_qla_host_t *vha)
{
int ret;
- scsi_qla_host_t *ha = vha->parent;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Check if physical ha port is Up */
- if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
- atomic_read(&ha->loop_state) == LOOP_DEAD ) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
vha->vp_err_state = VP_ERR_PORTDWN;
fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
goto enable_failed;
@@ -177,8 +175,8 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
vha->host_no, __func__));
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
- DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving"
- " of RSCN requests: 0x%x\n", ret));
+ DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
+ "receiving of RSCN requests: 0x%x\n", ret));
return;
} else {
/* Corresponds to SCR enabled */
@@ -194,25 +192,14 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
}
void
-qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
+qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
{
- int i, vp_idx_matched;
scsi_qla_host_t *vha;
+ struct qla_hw_data *ha = rsp->hw;
+ int i = 0;
- if (ha->parent)
- return;
-
- for_each_mapped_vp_idx(ha, i) {
- vp_idx_matched = 0;
-
- list_for_each_entry(vha, &ha->vp_list, vp_list) {
- if (i == vha->vp_idx) {
- vp_idx_matched = 1;
- break;
- }
- }
-
- if (vp_idx_matched) {
+ list_for_each_entry(vha, &ha->vp_list, list) {
+ if (vha->vp_idx) {
switch (mb[0]) {
case MBA_LIP_OCCURRED:
case MBA_LOOP_UP:
@@ -223,16 +210,17 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
DEBUG15(printk("scsi(%ld)%s: Async_event for"
- " VP[%d], mb = 0x%x, vha=%p\n",
- vha->host_no, __func__,i, *mb, vha));
- qla2x00_async_event(vha, mb);
+ " VP[%d], mb = 0x%x, vha=%p\n",
+ vha->host_no, __func__, i, *mb, vha));
+ qla2x00_async_event(vha, rsp, mb);
break;
}
}
+ i++;
}
}
-void
+int
qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
{
/*
@@ -247,38 +235,56 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
}
+ /* To exclusively reset vport, we need to log it out first.*/
+ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+
DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
vha->host_no, vha->vp_idx));
- qla24xx_enable_vp(vha);
+ return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
- scsi_qla_host_t *ha = vha->parent;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
- if (atomic_read(&ha->loop_state) == LOOP_READY) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
qla24xx_configure_vp(vha);
} else {
set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
- set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
+ set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
}
return 0;
}
- if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- qla2x00_vp_abort_isp(vha);
+ if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+ qla2x00_update_fcports(vha);
+ clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+ }
+
+ if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
+
+ DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
+ vha->host_no));
+ qla2x00_relogin(vha);
+
+ DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
+ vha->host_no));
+ }
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
clear_bit(RESET_ACTIVE, &vha->dpc_flags);
}
- if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
- test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -289,38 +295,30 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
void
-qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
+qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
{
int ret;
- int i, vp_idx_matched;
- scsi_qla_host_t *vha;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
- if (ha->parent)
+ if (vha->vp_idx)
return;
if (list_empty(&ha->vp_list))
return;
- clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
-
- for_each_mapped_vp_idx(ha, i) {
- vp_idx_matched = 0;
-
- list_for_each_entry(vha, &ha->vp_list, vp_list) {
- if (i == vha->vp_idx) {
- vp_idx_matched = 1;
- break;
- }
- }
+ clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
- if (vp_idx_matched)
- ret = qla2x00_do_dpc_vp(vha);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp->vp_idx)
+ ret = qla2x00_do_dpc_vp(vp);
}
}
int
qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
{
- scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
uint8_t port_name[WWN_SIZE];
@@ -337,7 +335,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up unique WWPN */
u64_to_wwn(fc_vport->port_name, port_name);
- if (!memcmp(port_name, ha->port_name, WWN_SIZE))
+ if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
return VPCERR_BAD_WWN;
vha = qla24xx_find_vhost_by_name(ha, port_name);
if (vha)
@@ -346,7 +344,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
- "max_npv_vports %ud.\n", ha->host_no,
+ "max_npv_vports %ud.\n", base_vha->host_no,
ha->num_vhosts, ha->max_npiv_vports));
return VPCERR_UNSUPPORTED;
}
@@ -356,59 +354,34 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport *fc_vport)
{
- scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
+ scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
+ struct qla_hw_data *ha = base_vha->hw;
scsi_qla_host_t *vha;
+ struct scsi_host_template *sht = &qla24xx_driver_template;
struct Scsi_Host *host;
- host = scsi_host_alloc(&qla24xx_driver_template,
- sizeof(scsi_qla_host_t));
- if (!host) {
- printk(KERN_WARNING
- "qla2xxx: scsi_host_alloc() failed for vport\n");
+ vha = qla2x00_create_host(sht, ha);
+ if (!vha) {
+ DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
return(NULL);
}
- vha = shost_priv(host);
-
- /* clone the parent hba */
- memcpy(vha, ha, sizeof (scsi_qla_host_t));
-
+ host = vha->host;
fc_vport->dd_data = vha;
-
- vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
- if (!vha->node_name)
- goto create_vhost_failed_1;
-
- vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
- if (!vha->port_name)
- goto create_vhost_failed_2;
-
/* New host info */
u64_to_wwn(fc_vport->node_name, vha->node_name);
u64_to_wwn(fc_vport->port_name, vha->port_name);
- vha->host = host;
- vha->host_no = host->host_no;
- vha->parent = ha;
vha->fc_vport = fc_vport;
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
vha->host_no));
- goto create_vhost_failed_3;
+ goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
- init_completion(&vha->mbx_cmd_comp);
- complete(&vha->mbx_cmd_comp);
- init_completion(&vha->mbx_intr_comp);
-
- INIT_LIST_HEAD(&vha->list);
- INIT_LIST_HEAD(&vha->fcports);
- INIT_LIST_HEAD(&vha->vp_fcports);
- INIT_LIST_HEAD(&vha->work_list);
-
vha->dpc_flags = 0L;
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -423,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
- host->can_queue = vha->request_q_length + 128;
+ memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES);
+ vha->req_ques[0] = ha->req_q_map[0]->id;
+ host->can_queue = ha->req_q_map[0]->length + 128;
host->this_id = 255;
host->cmd_per_lun = 3;
host->max_cmd_len = MAX_CMDSZ;
@@ -440,12 +415,341 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
return vha;
-create_vhost_failed_3:
- kfree(vha->port_name);
+create_vhost_failed:
+ return NULL;
+}
-create_vhost_failed_2:
- kfree(vha->node_name);
+static void
+qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id = req->id;
+
+ dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
+ sizeof(request_t), req->ring, req->dma);
+ req->ring = NULL;
+ req->dma = 0;
+ if (que_id) {
+ ha->req_q_map[que_id] = NULL;
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ }
+ kfree(req);
+ req = NULL;
+}
-create_vhost_failed_1:
- return NULL;
+static void
+qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t que_id = rsp->id;
+
+ if (rsp->msix && rsp->msix->have_irq) {
+ free_irq(rsp->msix->vector, rsp);
+ rsp->msix->have_irq = 0;
+ rsp->msix->rsp = NULL;
+ }
+ dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
+ sizeof(response_t), rsp->ring, rsp->dma);
+ rsp->ring = NULL;
+ rsp->dma = 0;
+ if (que_id) {
+ ha->rsp_q_map[que_id] = NULL;
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ }
+ kfree(rsp);
+ rsp = NULL;
+}
+
+int
+qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
+{
+ int ret = -1;
+
+ if (req) {
+ req->options |= BIT_0;
+ ret = qla25xx_init_req_que(vha, req, req->options);
+ }
+ if (ret == QLA_SUCCESS)
+ qla25xx_free_req_que(vha, req);
+
+ return ret;
+}
+
+int
+qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+{
+ int ret = -1;
+
+ if (rsp) {
+ rsp->options |= BIT_0;
+ ret = qla25xx_init_rsp_que(vha, rsp, rsp->options);
+ }
+ if (ret == QLA_SUCCESS)
+ qla25xx_free_rsp_que(vha, rsp);
+
+ return ret;
+}
+
+int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
+{
+ int ret = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[que];
+
+ req->options |= BIT_3;
+ req->qos = qos;
+ ret = qla25xx_init_req_que(vha, req, req->options);
+ if (ret != QLA_SUCCESS)
+ DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
+ /* restore options bit */
+ req->options &= ~BIT_3;
+ return ret;
+}
+
+
+/* Delete all queues for a given vhost */
+int
+qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
+{
+ int cnt, ret = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (que_no) {
+ /* Delete request queue */
+ req = ha->req_q_map[que_no];
+ if (req) {
+ rsp = req->rsp;
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Couldn't delete req que %d\n", req->id);
+ return ret;
+ }
+ /* Delete associated response queue */
+ if (rsp) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Couldn't delete rsp que %d\n",
+ rsp->id);
+ return ret;
+ }
+ }
+ }
+ } else { /* delete all queues of this host */
+ for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
+ /* Delete request queues */
+ req = ha->req_q_map[vha->req_ques[cnt]];
+ if (req && req->id) {
+ rsp = req->rsp;
+ ret = qla25xx_delete_req_que(vha, req);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Couldn't delete req que %d\n",
+ vha->req_ques[cnt]);
+ return ret;
+ }
+ vha->req_ques[cnt] = ha->req_q_map[0]->id;
+ /* Delete associated response queue */
+ if (rsp && rsp->id) {
+ ret = qla25xx_delete_rsp_que(vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha,
+ "Couldn't delete rsp que %d\n",
+ rsp->id);
+ return ret;
+ }
+ }
+ }
+ }
+ }
+ qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
+ vha->vp_idx);
+ return ret;
+}
+
+int
+qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
+ uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
+{
+ int ret = 0;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ uint16_t que_id = 0;
+
+ req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+ if (req == NULL) {
+ qla_printk(KERN_WARNING, ha, "could not allocate memory"
+ "for request que\n");
+ goto que_failed;
+ }
+
+ req->length = REQUEST_ENTRY_CNT_24XX;
+ req->ring = dma_alloc_coherent(&ha->pdev->dev,
+ (req->length + 1) * sizeof(request_t),
+ &req->dma, GFP_KERNEL);
+ if (req->ring == NULL) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - request_ring\n");
+ goto que_failed;
+ }
+
+ mutex_lock(&ha->vport_lock);
+ que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
+ if (que_id >= ha->max_queues) {
+ mutex_unlock(&ha->vport_lock);
+ qla_printk(KERN_INFO, ha, "No resources to create "
+ "additional request queue\n");
+ goto que_failed;
+ }
+ set_bit(que_id, ha->req_qid_map);
+ ha->req_q_map[que_id] = req;
+ req->rid = rid;
+ req->vp_idx = vp_idx;
+ req->qos = qos;
+
+ if (ha->rsp_q_map[rsp_que])
+ req->rsp = ha->rsp_q_map[rsp_que];
+ /* Use alternate PCI bus number */
+ if (MSB(req->rid))
+ options |= BIT_4;
+ /* Use alternate PCI devfn */
+ if (LSB(req->rid))
+ options |= BIT_5;
+ req->options = options;
+ req->ring_ptr = req->ring;
+ req->ring_index = 0;
+ req->cnt = req->length;
+ req->id = que_id;
+ mutex_unlock(&ha->vport_lock);
+
+ ret = qla25xx_init_req_que(base_vha, req, options);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ goto que_failed;
+ }
+
+ return req->id;
+
+que_failed:
+ qla25xx_free_req_que(base_vha, req);
+ return 0;
+}
+
+/* create response queue */
+int
+qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
+ uint8_t vp_idx, uint16_t rid)
+{
+ int ret = 0;
+ struct rsp_que *rsp = NULL;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ uint16_t que_id = 0;;
+
+ rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+ if (rsp == NULL) {
+ qla_printk(KERN_WARNING, ha, "could not allocate memory for"
+ " response que\n");
+ goto que_failed;
+ }
+
+ rsp->length = RESPONSE_ENTRY_CNT_2300;
+ rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
+ (rsp->length + 1) * sizeof(response_t),
+ &rsp->dma, GFP_KERNEL);
+ if (rsp->ring == NULL) {
+ qla_printk(KERN_WARNING, ha,
+ "Memory Allocation failed - response_ring\n");
+ goto que_failed;
+ }
+
+ mutex_lock(&ha->vport_lock);
+ que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
+ if (que_id >= ha->max_queues) {
+ mutex_unlock(&ha->vport_lock);
+ qla_printk(KERN_INFO, ha, "No resources to create "
+ "additional response queue\n");
+ goto que_failed;
+ }
+ set_bit(que_id, ha->rsp_qid_map);
+
+ if (ha->flags.msix_enabled)
+ rsp->msix = &ha->msix_entries[que_id + 1];
+ else
+ qla_printk(KERN_WARNING, ha, "msix not enabled\n");
+
+ ha->rsp_q_map[que_id] = rsp;
+ rsp->rid = rid;
+ rsp->vp_idx = vp_idx;
+ rsp->hw = ha;
+ /* Use alternate PCI bus number */
+ if (MSB(rsp->rid))
+ options |= BIT_4;
+ /* Use alternate PCI devfn */
+ if (LSB(rsp->rid))
+ options |= BIT_5;
+ rsp->options = options;
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
+ rsp->id = que_id;
+ mutex_unlock(&ha->vport_lock);
+
+ ret = qla25xx_request_irq(rsp);
+ if (ret)
+ goto que_failed;
+
+ ret = qla25xx_init_rsp_que(base_vha, rsp, options);
+ if (ret != QLA_SUCCESS) {
+ qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ mutex_lock(&ha->vport_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->vport_lock);
+ goto que_failed;
+ }
+
+ qla2x00_init_response_q_entries(rsp);
+
+ return rsp->id;
+
+que_failed:
+ qla25xx_free_rsp_que(base_vha, rsp);
+ return 0;
+}
+
+int
+qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
+{
+ uint16_t options = 0;
+ uint8_t ret = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ options |= BIT_1;
+ ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
+ if (!ret) {
+ qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
+ return ret;
+ } else
+ qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
+
+ options = 0;
+ if (qos & BIT_7)
+ options |= BIT_8;
+ ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
+ qos & ~BIT_7);
+ if (ret) {
+ vha->req_ques[0] = ret;
+ qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
+ } else
+ qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
+
+ return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3433441b956a..8ea927788b3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
"Enables iIDMA settings "
"Default is 1 - perform iIDMA. 0 - no iIDMA.");
-
+int ql2xmaxqueues = 1;
+module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xmaxqueues,
+ "Enables MQ settings "
+ "Default is 1 for single queue. Set it to number \
+ of queues in MQ mode.");
/*
* SCSI host template entry points
*/
@@ -183,42 +188,108 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
*/
__inline__ void
-qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
+qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
{
- init_timer(&ha->timer);
- ha->timer.expires = jiffies + interval * HZ;
- ha->timer.data = (unsigned long)ha;
- ha->timer.function = (void (*)(unsigned long))func;
- add_timer(&ha->timer);
- ha->timer_active = 1;
+ init_timer(&vha->timer);
+ vha->timer.expires = jiffies + interval * HZ;
+ vha->timer.data = (unsigned long)vha;
+ vha->timer.function = (void (*)(unsigned long))func;
+ add_timer(&vha->timer);
+ vha->timer_active = 1;
}
static inline void
-qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
+qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
- mod_timer(&ha->timer, jiffies + interval * HZ);
+ mod_timer(&vha->timer, jiffies + interval * HZ);
}
static __inline__ void
-qla2x00_stop_timer(scsi_qla_host_t *ha)
+qla2x00_stop_timer(scsi_qla_host_t *vha)
{
- del_timer_sync(&ha->timer);
- ha->timer_active = 0;
+ del_timer_sync(&vha->timer);
+ vha->timer_active = 0;
}
static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
-static int qla2x00_mem_alloc(scsi_qla_host_t *);
-static void qla2x00_mem_free(scsi_qla_host_t *ha);
-static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
+static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
+ struct req_que **, struct rsp_que **);
+static void qla2x00_mem_free(struct qla_hw_data *);
+static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
+static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+{
+ ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
+ GFP_KERNEL);
+ if (!ha->req_q_map) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for request queue ptrs\n");
+ goto fail_req_map;
+ }
+
+ ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
+ GFP_KERNEL);
+ if (!ha->rsp_q_map) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for response queue ptrs\n");
+ goto fail_rsp_map;
+ }
+ set_bit(0, ha->rsp_qid_map);
+ set_bit(0, ha->req_qid_map);
+ return 1;
+
+fail_rsp_map:
+ kfree(ha->req_q_map);
+ ha->req_q_map = NULL;
+fail_req_map:
+ return -ENOMEM;
+}
+
+static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
+ struct rsp_que *rsp)
+{
+ if (rsp && rsp->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (rsp->length + 1) * sizeof(response_t),
+ rsp->ring, rsp->dma);
+
+ kfree(rsp);
+ rsp = NULL;
+ if (req && req->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (req->length + 1) * sizeof(request_t),
+ req->ring, req->dma);
+
+ kfree(req);
+ req = NULL;
+}
+
+static void qla2x00_free_queues(struct qla_hw_data *ha)
+{
+ struct req_que *req;
+ struct rsp_que *rsp;
+ int cnt;
+
+ for (cnt = 0; cnt < ha->max_queues; cnt++) {
+ rsp = ha->rsp_q_map[cnt];
+ req = ha->req_q_map[cnt];
+ qla2x00_free_que(ha, req, rsp);
+ }
+ kfree(ha->rsp_q_map);
+ ha->rsp_q_map = NULL;
+
+ kfree(ha->req_q_map);
+ ha->req_q_map = NULL;
+}
static char *
-qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
{
+ struct qla_hw_data *ha = vha->hw;
static char *pci_bus_modes[] = {
"33", "66", "100", "133",
};
@@ -240,9 +311,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
}
static char *
-qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
{
static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+ struct qla_hw_data *ha = vha->hw;
uint32_t pci_bus;
int pcie_reg;
@@ -290,9 +362,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
}
static char *
-qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
{
char un_str[10];
+ struct qla_hw_data *ha = vha->hw;
sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
ha->fw_minor_version,
@@ -328,8 +401,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
}
static char *
-qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
{
+ struct qla_hw_data *ha = vha->hw;
sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
ha->fw_minor_version,
ha->fw_subminor_version);
@@ -354,18 +428,20 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
}
static inline srb_t *
-qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
+qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
if (!sp)
return sp;
- sp->ha = ha;
+ sp->vha = vha;
sp->fcport = fcport;
sp->cmd = cmd;
+ sp->que = ha->req_q_map[0];
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
cmd->scsi_done = done;
@@ -376,9 +452,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
static int
qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
srb_t *sp;
int rval;
@@ -394,44 +471,43 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (fcport->drport) {
- cmd->result = DID_IMM_RETRY << 16;
- goto qc_fail_command;
- }
+ if (fcport->drport)
+ goto qc_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&ha->loop_state) == LOOP_DEAD) {
+ atomic_read(&vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
- goto qc_host_busy;
+ goto qc_target_busy;
}
- spin_unlock_irq(ha->host->host_lock);
+ spin_unlock_irq(vha->host->host_lock);
- sp = qla2x00_get_new_sp(ha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
if (!sp)
goto qc_host_busy_lock;
- rval = qla2x00_start_scsi(sp);
+ rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
+ spin_lock_irq(vha->host->host_lock);
return 0;
qc_host_busy_free_sp:
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
qc_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
-qc_host_busy:
+ spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_HOST_BUSY;
+qc_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
qc_fail_command:
done(cmd);
@@ -442,14 +518,15 @@ qc_fail_command:
static int
qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
- scsi_qla_host_t *pha = to_qla_parent(ha);
- if (unlikely(pci_channel_offline(pha->pdev))) {
+ if (unlikely(pci_channel_offline(ha->pdev))) {
cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -461,44 +538,43 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
}
/* Close window on fcport/rport state-transitioning. */
- if (fcport->drport) {
- cmd->result = DID_IMM_RETRY << 16;
- goto qc24_fail_command;
- }
+ if (fcport->drport)
+ goto qc24_target_busy;
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&pha->loop_state) == LOOP_DEAD) {
+ atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
- goto qc24_host_busy;
+ goto qc24_target_busy;
}
- spin_unlock_irq(ha->host->host_lock);
+ spin_unlock_irq(vha->host->host_lock);
- sp = qla2x00_get_new_sp(pha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
if (!sp)
goto qc24_host_busy_lock;
- rval = qla24xx_start_scsi(sp);
+ rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc24_host_busy_free_sp;
- spin_lock_irq(ha->host->host_lock);
+ spin_lock_irq(vha->host->host_lock);
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(pha, sp);
- mempool_free(sp, pha->srb_mempool);
+ qla2x00_sp_free_dma(sp);
+ mempool_free(sp, ha->srb_mempool);
qc24_host_busy_lock:
- spin_lock_irq(ha->host->host_lock);
-
-qc24_host_busy:
+ spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_HOST_BUSY;
+qc24_target_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
qc24_fail_command:
done(cmd);
@@ -512,17 +588,14 @@ qc24_fail_command:
* max time.
*
* Input:
- * ha = actual ha whose done queue will contain the command
- * returned by firmware.
* cmd = Scsi Command to wait on.
- * flag = Abort/Reset(Bus or Device Reset)
*
* Return:
* Not Found : 0
* Found : 1
*/
static int
-qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
+qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
{
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -559,21 +632,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
* Failed (Adapter is offline/disabled) : 1
*/
int
-qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
+qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
{
int return_status;
unsigned long wait_online;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) ||
- test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) ||
- pha->dpc_active) && time_before(jiffies, wait_online)) {
+ while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ ha->dpc_active) && time_before(jiffies, wait_online)) {
msleep(1000);
}
- if (pha->flags.online)
+ if (base_vha->flags.online)
return_status = QLA_SUCCESS;
else
return_status = QLA_FUNCTION_FAILED;
@@ -598,19 +672,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
* Failed (LOOP_NOT_READY) : 1
*/
static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
+qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
{
int return_status = QLA_SUCCESS;
unsigned long loop_timeout ;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* wait for 5 min at the max for loop to be ready */
loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
- while ((!atomic_read(&pha->loop_down_timer) &&
- atomic_read(&pha->loop_state) == LOOP_DOWN) ||
- atomic_read(&pha->loop_state) != LOOP_READY) {
- if (atomic_read(&pha->loop_state) == LOOP_DEAD) {
+ while ((!atomic_read(&base_vha->loop_down_timer) &&
+ atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
+ atomic_read(&base_vha->loop_state) != LOOP_READY) {
+ if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
return_status = QLA_FUNCTION_FAILED;
break;
}
@@ -626,35 +701,42 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
void
qla2x00_abort_fcport_cmds(fc_port_t *fcport)
{
- int cnt;
+ int cnt, que, id;
unsigned long flags;
srb_t *sp;
- scsi_qla_host_t *ha = fcport->ha;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ scsi_qla_host_t *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
- spin_lock_irqsave(&pha->hardware_lock, flags);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = pha->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->fcport != fcport)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
+ id = vha->req_ques[que];
+ req = ha->req_q_map[id];
+ if (!req)
continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->fcport != fcport)
+ continue;
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(ha, sp)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed -- %lx\n", sp->cmd->serial_number));
- } else {
- if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
- QLA_SUCCESS)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(vha, sp, req)) {
DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed while waiting -- %lx\n",
- sp->cmd->serial_number));
-
+ "Abort failed -- %lx\n",
+ sp->cmd->serial_number));
+ } else {
+ if (qla2x00_eh_wait_on_command(sp->cmd) !=
+ QLA_SUCCESS)
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "Abort failed while waiting -- %lx\n",
+ sp->cmd->serial_number));
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- spin_lock_irqsave(&pha->hardware_lock, flags);
}
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void
@@ -692,14 +774,16 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
int ret, i;
unsigned int id, lun;
unsigned long serial;
unsigned long flags;
int wait = 0;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
+ srb_t *spt;
qla2x00_block_error_handler(cmd);
@@ -711,11 +795,15 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
serial = cmd->serial_number;
+ spt = (srb_t *) CMD_SP(cmd);
+ if (!spt)
+ return SUCCESS;
+ req = spt->que;
/* Check active list for command command. */
- spin_lock_irqsave(&pha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
- sp = pha->outstanding_cmds[i];
+ sp = req->outstanding_cmds[i];
if (sp == NULL)
continue;
@@ -723,37 +811,36 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (sp->cmd != cmd)
continue;
- DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n",
- __func__, ha->host_no, sp, serial));
+ DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
+ " pid=%ld.\n", __func__, vha->host_no, sp, serial));
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(ha, sp)) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(vha, sp, req)) {
DEBUG2(printk("%s(%ld): abort_command "
- "mbx failed.\n", __func__, ha->host_no));
+ "mbx failed.\n", __func__, vha->host_no));
} else {
DEBUG3(printk("%s(%ld): abort_command "
- "mbx success.\n", __func__, ha->host_no));
+ "mbx success.\n", __func__, vha->host_no));
wait = 1;
}
- spin_lock_irqsave(&pha->hardware_lock, flags);
-
+ spin_lock_irqsave(&ha->hardware_lock, flags);
break;
}
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for the command to be returned. */
if (wait) {
- if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"scsi(%ld:%d:%d): Abort handler timed out -- %lx "
- "%x.\n", ha->host_no, id, lun, serial, ret);
+ "%x.\n", vha->host_no, id, lun, serial, ret);
ret = FAILED;
}
}
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
- ha->host_no, id, lun, wait, serial, ret);
+ vha->host_no, id, lun, wait, serial, ret);
return ret;
}
@@ -765,23 +852,27 @@ enum nexus_wait_type {
};
static int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
- unsigned int l, enum nexus_wait_type type)
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ unsigned int l, srb_t *sp, enum nexus_wait_type type)
{
int cnt, match, status;
- srb_t *sp;
unsigned long flags;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
status = QLA_SUCCESS;
- spin_lock_irqsave(&pha->hardware_lock, flags);
- for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS;
- cnt++) {
- sp = pha->outstanding_cmds[cnt];
+ if (!sp)
+ return status;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ req = sp->que;
+ for (cnt = 1; status == QLA_SUCCESS &&
+ cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
- if (ha->vp_idx != sp->fcport->ha->vp_idx)
+ if (vha->vp_idx != sp->fcport->vha->vp_idx)
continue;
match = 0;
switch (type) {
@@ -793,17 +884,17 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
break;
case WAIT_LUN:
match = (sp->cmd->device->id == t &&
- sp->cmd->device->lun == l);
+ sp->cmd->device->lun == l);
break;
}
if (!match)
continue;
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
- status = qla2x00_eh_wait_on_command(ha, sp->cmd);
- spin_lock_irqsave(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ status = qla2x00_eh_wait_on_command(sp->cmd);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
}
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return status;
}
@@ -819,7 +910,7 @@ static int
__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
@@ -828,31 +919,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
if (!fcport)
return FAILED;
- qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
- ha->host_no, cmd->device->id, cmd->device->lun, name);
+ qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
+ vha->host_no, cmd->device->id, cmd->device->lun, name);
err = 0;
- if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
goto eh_reset_failed;
err = 1;
- if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
goto eh_reset_failed;
err = 2;
if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
goto eh_reset_failed;
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id,
- cmd->device->lun, type) != QLA_SUCCESS)
+ if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
+ cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
goto eh_reset_failed;
- qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
- ha->host_no, cmd->device->id, cmd->device->lun, name);
+ qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
+ vha->host_no, cmd->device->id, cmd->device->lun, name);
return SUCCESS;
eh_reset_failed:
- qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n",
- ha->host_no, cmd->device->id, cmd->device->lun, name,
+ qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
+ , vha->host_no, cmd->device->id, cmd->device->lun, name,
reset_errors[err]);
return FAILED;
}
@@ -860,7 +951,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
static int
qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
ha->isp_ops->lun_reset);
@@ -869,7 +961,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
static int
qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
ha->isp_ops->target_reset);
@@ -893,12 +986,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
static int
qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
+ srb_t *sp = (srb_t *) CMD_SP(cmd);
qla2x00_block_error_handler(cmd);
@@ -909,28 +1002,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
if (!fcport)
return ret;
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun);
+ qla_printk(KERN_INFO, vha->hw,
+ "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) {
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
DEBUG2(printk("%s failed:board disabled\n",__func__));
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(ha) == QLA_SUCCESS)
+ if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
ret = SUCCESS;
}
if (ret == FAILED)
goto eh_bus_reset_done;
/* Flush outstanding commands. */
- if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) !=
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
QLA_SUCCESS)
ret = FAILED;
eh_bus_reset_done:
- qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
+ qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
(ret == FAILED) ? "failed" : "succeded");
return ret;
@@ -954,12 +1047,14 @@ eh_bus_reset_done:
static int
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
- scsi_qla_host_t *ha = shost_priv(cmd->device->host);
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
+ struct qla_hw_data *ha = vha->hw;
int ret = FAILED;
unsigned int id, lun;
unsigned long serial;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ srb_t *sp = (srb_t *) CMD_SP(cmd);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
qla2x00_block_error_handler(cmd);
@@ -971,9 +1066,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
return ret;
qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun);
+ "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
- if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
/*
@@ -984,26 +1079,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
* devices as lost kicking of the port_down_timer
* while dpc is stuck for the mailbox to complete.
*/
- qla2x00_wait_for_loop_ready(ha);
- set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
- if (qla2x00_abort_isp(pha)) {
- clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
- /* failed. schedule dpc to try */
- set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
-
- if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
+ qla2x00_wait_for_loop_ready(vha);
+ if (vha != base_vha) {
+ if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
+ } else {
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (qla2x00_abort_isp(base_vha)) {
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ /* failed. schedule dpc to try */
+ set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ goto eh_host_reset_lock;
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
- clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
- /* Waiting for our command in done_queue to be returned to OS.*/
- if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) ==
- QLA_SUCCESS)
+ /* Waiting for command to be returned to OS.*/
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
+ QLA_SUCCESS)
ret = SUCCESS;
- if (ha->parent)
- qla2x00_vp_abort_isp(ha);
-
eh_host_reset_lock:
qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
(ret == FAILED) ? "failed" : "succeded");
@@ -1022,35 +1119,36 @@ eh_host_reset_lock:
* 0 = success
*/
int
-qla2x00_loop_reset(scsi_qla_host_t *ha)
+qla2x00_loop_reset(scsi_qla_host_t *vha)
{
int ret;
struct fc_port *fcport;
+ struct qla_hw_data *ha = vha->hw;
- if (ha->flags.enable_lip_full_login) {
- ret = qla2x00_full_login_lip(ha);
+ if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
+ ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "full_login_lip=%d.\n", __func__, ha->host_no,
+ DEBUG2_3(printk("%s(%ld): failed: "
+ "full_login_lip=%d.\n", __func__, vha->host_no,
ret));
}
- atomic_set(&ha->loop_state, LOOP_DOWN);
- atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(ha, 0);
- qla2x00_wait_for_loop_ready(ha);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
+ qla2x00_wait_for_loop_ready(vha);
}
- if (ha->flags.enable_lip_reset) {
- ret = qla2x00_lip_reset(ha);
+ if (ha->flags.enable_lip_reset && !vha->vp_idx) {
+ ret = qla2x00_lip_reset(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "lip_reset=%d.\n", __func__, ha->host_no, ret));
- }
- qla2x00_wait_for_loop_ready(ha);
+ DEBUG2_3(printk("%s(%ld): failed: "
+ "lip_reset=%d.\n", __func__, vha->host_no, ret));
+ } else
+ qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_target_reset) {
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
continue;
@@ -1058,31 +1156,37 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
if (ret != QLA_SUCCESS) {
DEBUG2_3(printk("%s(%ld): bus_reset failed: "
"target_reset=%d d_id=%x.\n", __func__,
- ha->host_no, ret, fcport->d_id.b24));
+ vha->host_no, ret, fcport->d_id.b24));
}
}
}
-
/* Issue marker command only when we are going to start the I/O */
- ha->marker_needed = 1;
+ vha->marker_needed = 1;
return QLA_SUCCESS;
}
void
-qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
{
- int cnt;
+ int que, cnt;
unsigned long flags;
srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = ha->outstanding_cmds[cnt];
- if (sp) {
- ha->outstanding_cmds[cnt] = NULL;
- sp->cmd->result = res;
- qla2x00_sp_compl(ha, sp);
+ for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
+ req = ha->req_q_map[vha->req_ques[que]];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp && sp->vha == vha) {
+ req->outstanding_cmds[cnt] = NULL;
+ sp->cmd->result = res;
+ qla2x00_sp_compl(ha, sp);
+ }
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1104,13 +1208,15 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
static int
qla2xxx_slave_configure(struct scsi_device *sdev)
{
- scsi_qla_host_t *ha = shost_priv(sdev->host);
+ scsi_qla_host_t *vha = shost_priv(sdev->host);
+ struct qla_hw_data *ha = vha->hw;
struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+ struct req_que *req = ha->req_q_map[0];
if (sdev->tagged_supported)
- scsi_activate_tcq(sdev, ha->max_q_depth);
+ scsi_activate_tcq(sdev, req->max_q_depth);
else
- scsi_deactivate_tcq(sdev, ha->max_q_depth);
+ scsi_deactivate_tcq(sdev, req->max_q_depth);
rport->dev_loss_tmo = ha->port_down_retry_count;
@@ -1153,8 +1259,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
* supported addressing method.
*/
static void
-qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
+qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
/* Assume a 32bit DMA mask. */
ha->flags.enable_64bit_addressing = 0;
@@ -1175,7 +1282,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
}
static void
-qla2x00_enable_intrs(scsi_qla_host_t *ha)
+qla2x00_enable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1190,7 +1297,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
}
static void
-qla2x00_disable_intrs(scsi_qla_host_t *ha)
+qla2x00_disable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1204,7 +1311,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
}
static void
-qla24xx_enable_intrs(scsi_qla_host_t *ha)
+qla24xx_enable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1217,7 +1324,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
}
static void
-qla24xx_disable_intrs(scsi_qla_host_t *ha)
+qla24xx_disable_intrs(struct qla_hw_data *ha)
{
unsigned long flags = 0;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1261,6 +1368,10 @@ static struct isp_operations qla2100_isp_ops = {
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
+ .start_scsi = qla2x00_start_scsi,
+ .wrt_req_reg = NULL,
+ .wrt_rsp_reg = NULL,
+ .rd_req_reg = NULL,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1295,6 +1406,10 @@ static struct isp_operations qla2300_isp_ops = {
.read_optrom = qla2x00_read_optrom_data,
.write_optrom = qla2x00_write_optrom_data,
.get_flash_version = qla2x00_get_flash_version,
+ .start_scsi = qla2x00_start_scsi,
+ .wrt_req_reg = NULL,
+ .wrt_rsp_reg = NULL,
+ .rd_req_reg = NULL,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1329,6 +1444,10 @@ static struct isp_operations qla24xx_isp_ops = {
.read_optrom = qla24xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_start_scsi,
+ .wrt_req_reg = qla24xx_wrt_req_reg,
+ .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
+ .rd_req_reg = qla24xx_rd_req_reg,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1363,10 +1482,14 @@ static struct isp_operations qla25xx_isp_ops = {
.read_optrom = qla25xx_read_optrom_data,
.write_optrom = qla24xx_write_optrom_data,
.get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qla24xx_start_scsi,
+ .wrt_req_reg = qla24xx_wrt_req_reg,
+ .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
+ .rd_req_reg = qla24xx_rd_req_reg,
};
static inline void
-qla2x00_set_isp_flags(scsi_qla_host_t *ha)
+qla2x00_set_isp_flags(struct qla_hw_data *ha)
{
ha->device_type = DT_EXTENDED_IDS;
switch (ha->pdev->device) {
@@ -1448,9 +1571,10 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
}
static int
-qla2x00_iospace_config(scsi_qla_host_t *ha)
+qla2x00_iospace_config(struct qla_hw_data *ha)
{
resource_size_t pio;
+ uint16_t msix;
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
@@ -1503,6 +1627,30 @@ skip_pio:
goto iospace_error_exit;
}
+ /* Determine queue resources */
+ ha->max_queues = 1;
+ if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
+ goto mqiobase_exit;
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+ pci_resource_len(ha->pdev, 3));
+ if (ha->mqiobase) {
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ha->msix_count - 1 < ql2xmaxqueues)
+ ha->max_queues = ha->msix_count - 1;
+ else if (ql2xmaxqueues > QLA_MQ_SIZE)
+ ha->max_queues = QLA_MQ_SIZE;
+ else
+ ha->max_queues = ql2xmaxqueues;
+ qla_printk(KERN_INFO, ha,
+ "MSI-X vector count: %d\n", msix);
+ }
+
+mqiobase_exit:
+ ha->msix_count = ha->max_queues + 1;
return (0);
iospace_error_exit:
@@ -1512,25 +1660,25 @@ iospace_error_exit:
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
- set_bit(RSCN_UPDATE, &ha->dpc_flags);
- set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(RSCN_UPDATE, &vha->dpc_flags);
+ set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
}
static int
qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *vha = shost_priv(shost);
- if (!ha->host)
+ if (!vha->host)
return 1;
- if (time > ha->loop_reset_delay * HZ)
+ if (time > vha->hw->loop_reset_delay * HZ)
return 1;
- return atomic_read(&ha->loop_state) == LOOP_READY;
+ return atomic_read(&vha->loop_state) == LOOP_READY;
}
/*
@@ -1541,11 +1689,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
struct Scsi_Host *host;
- scsi_qla_host_t *ha;
+ scsi_qla_host_t *base_vha = NULL;
+ struct qla_hw_data *ha;
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
- int bars, mem_only = 0;
+ int bars, max_id, mem_only = 0;
+ uint16_t req_length = 0, rsp_length = 0;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2x00_driver_template;
@@ -1568,37 +1720,27 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_out;
}
- if (pci_find_aer_capability(pdev))
- if (pci_enable_pcie_error_reporting(pdev))
- goto probe_out;
+ /* This may fail but that's ok */
+ pci_enable_pcie_error_reporting(pdev);
- host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
- if (host == NULL) {
- printk(KERN_WARNING
- "qla2xxx: Couldn't allocate host from scsi layer!\n");
- goto probe_disable_device;
+ ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
+ if (!ha) {
+ DEBUG(printk("Unable to allocate memory for ha\n"));
+ goto probe_out;
}
+ ha->pdev = pdev;
/* Clear our data area */
- ha = shost_priv(host);
- memset(ha, 0, sizeof(scsi_qla_host_t));
-
- ha->pdev = pdev;
- ha->host = host;
- ha->host_no = host->host_no;
- sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
- ha->parent = NULL;
ha->bars = bars;
ha->mem_only = mem_only;
spin_lock_init(&ha->hardware_lock);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
-
/* Configure PCI I/O space */
ret = qla2x00_iospace_config(ha);
if (ret)
- goto probe_failed;
+ goto probe_hw_failed;
qla_printk(KERN_INFO, ha,
"Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1606,95 +1748,137 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
- ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
- ha->max_q_depth = MAX_Q_DEPTH;
- if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
- ha->max_q_depth = ql2xmaxqdepth;
-
/* Assign ISP specific operations. */
+ max_id = MAX_TARGETS_2200;
if (IS_QLA2100(ha)) {
- host->max_id = MAX_TARGETS_2100;
+ max_id = MAX_TARGETS_2100;
ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
- ha->request_q_length = REQUEST_ENTRY_CNT_2100;
- ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
- ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
- host->sg_tablesize = 32;
+ req_length = REQUEST_ENTRY_CNT_2100;
+ rsp_length = RESPONSE_ENTRY_CNT_2100;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
ha->gid_list_info_size = 4;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
- host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- ha->request_q_length = REQUEST_ENTRY_CNT_2200;
- ha->response_q_length = RESPONSE_ENTRY_CNT_2100;
- ha->last_loop_id = SNS_LAST_LOOP_ID_2100;
+ req_length = REQUEST_ENTRY_CNT_2200;
+ rsp_length = RESPONSE_ENTRY_CNT_2100;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
ha->gid_list_info_size = 4;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA23XX(ha)) {
- host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- ha->request_q_length = REQUEST_ENTRY_CNT_2200;
- ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
- ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+ req_length = REQUEST_ENTRY_CNT_2200;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->gid_list_info_size = 6;
if (IS_QLA2322(ha) || IS_QLA6322(ha))
ha->optrom_size = OPTROM_SIZE_2322;
ha->isp_ops = &qla2300_isp_ops;
} else if (IS_QLA24XX_TYPE(ha)) {
- host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
- ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
- ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
- ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_24XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
ha->isp_ops = &qla24xx_isp_ops;
} else if (IS_QLA25XX(ha)) {
- host->max_id = MAX_TARGETS_2200;
ha->mbx_count = MAILBOX_REGISTER_COUNT;
- ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
- ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
- ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
+ req_length = REQUEST_ENTRY_CNT_24XX;
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
- ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
ha->gid_list_info_size = 8;
ha->optrom_size = OPTROM_SIZE_25XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
ha->isp_ops = &qla25xx_isp_ops;
}
- host->can_queue = ha->request_q_length + 128;
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
- INIT_LIST_HEAD(&ha->list);
- INIT_LIST_HEAD(&ha->fcports);
- INIT_LIST_HEAD(&ha->vp_list);
- INIT_LIST_HEAD(&ha->work_list);
-
set_bit(0, (unsigned long *) ha->vp_idx_map);
- qla2x00_config_dma_addressing(ha);
- if (qla2x00_mem_alloc(ha)) {
+ ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
+ if (!ret) {
qla_printk(KERN_WARNING, ha,
"[ERROR] Failed to allocate memory for adapter\n");
+ goto probe_hw_failed;
+ }
+
+ req->max_q_depth = MAX_Q_DEPTH;
+ if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
+ req->max_q_depth = ql2xmaxqdepth;
+
+
+ base_vha = qla2x00_create_host(sht, ha);
+ if (!base_vha) {
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Failed to allocate memory for scsi_host\n");
+
ret = -ENOMEM;
+ goto probe_hw_failed;
+ }
+
+ pci_set_drvdata(pdev, base_vha);
+
+ qla2x00_config_dma_addressing(base_vha);
+
+ host = base_vha->host;
+ base_vha->req_ques[0] = req->id;
+ host->can_queue = req->length + 128;
+ if (IS_QLA2XXX_MIDTYPE(ha))
+ base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ else
+ base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
+ base_vha->vp_idx;
+ if (IS_QLA2100(ha))
+ host->sg_tablesize = 32;
+ host->max_id = max_id;
+ host->this_id = 255;
+ host->cmd_per_lun = 3;
+ host->unique_id = host->host_no;
+ host->max_cmd_len = MAX_CMDSZ;
+ host->max_channel = MAX_BUSES - 1;
+ host->max_lun = MAX_LUNS;
+ host->transportt = qla2xxx_transport_template;
+
+ /* Set up the irqs */
+ ret = qla2x00_request_irqs(ha, rsp);
+ if (ret)
goto probe_failed;
+
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha)) {
+ qla_printk(KERN_WARNING, ha,
+ "[ERROR] Failed to allocate memory for queue"
+ " pointers\n");
+ goto probe_failed;
+ }
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
+
+ if (ha->mqenable) {
+ ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
+ ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
+ ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
}
- if (qla2x00_initialize_adapter(ha)) {
+ if (qla2x00_initialize_adapter(base_vha)) {
qla_printk(KERN_WARNING, ha,
"Failed to initialize adapter\n");
DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
"Adapter flags %x.\n",
- ha->host_no, ha->device_flags));
+ base_vha->host_no, base_vha->device_flags));
ret = -ENODEV;
goto probe_failed;
@@ -1704,7 +1888,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
* Startup the kernel thread for this host adapter
*/
ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
- "%s_dpc", ha->host_str);
+ "%s_dpc", base_vha->host_str);
if (IS_ERR(ha->dpc_thread)) {
qla_printk(KERN_WARNING, ha,
"Unable to start DPC thread!\n");
@@ -1712,28 +1896,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
- host->this_id = 255;
- host->cmd_per_lun = 3;
- host->unique_id = host->host_no;
- host->max_cmd_len = MAX_CMDSZ;
- host->max_channel = MAX_BUSES - 1;
- host->max_lun = MAX_LUNS;
- host->transportt = qla2xxx_transport_template;
-
- ret = qla2x00_request_irqs(ha);
- if (ret)
- goto probe_failed;
+ list_add_tail(&base_vha->list, &ha->vp_list);
+ base_vha->host->irq = ha->pdev->irq;
/* Initialized the timer */
- qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL);
+ qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
- ha->host_no, ha));
-
- pci_set_drvdata(pdev, ha);
+ base_vha->host_no, ha));
- ha->flags.init_done = 1;
- ha->flags.online = 1;
+ base_vha->flags.init_done = 1;
+ base_vha->flags.online = 1;
ret = scsi_add_host(host, &pdev->dev);
if (ret)
@@ -1743,76 +1916,98 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(host);
- qla2x00_alloc_sysfs_attr(ha);
+ qla2x00_alloc_sysfs_attr(base_vha);
- qla2x00_init_host_attr(ha);
+ qla2x00_init_host_attr(base_vha);
- qla2x00_dfs_setup(ha);
+ qla2x00_dfs_setup(base_vha);
qla_printk(KERN_INFO, ha, "\n"
" QLogic Fibre Channel HBA Driver: %s\n"
" QLogic %s - %s\n"
" ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
qla2x00_version_str, ha->model_number,
- ha->model_desc ? ha->model_desc: "", pdev->device,
- ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev),
- ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
- ha->isp_ops->fw_version_str(ha, fw_str));
+ ha->model_desc ? ha->model_desc : "", pdev->device,
+ ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
+ ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
+ ha->isp_ops->fw_version_str(base_vha, fw_str));
return 0;
probe_failed:
- qla2x00_free_device(ha);
+ qla2x00_free_que(ha, req, rsp);
+ qla2x00_free_device(base_vha);
- scsi_host_put(host);
+ scsi_host_put(base_vha->host);
-probe_disable_device:
- pci_disable_device(pdev);
+probe_hw_failed:
+ if (ha->iobase)
+ iounmap(ha->iobase);
+
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ kfree(ha);
+ ha = NULL;
probe_out:
+ pci_disable_device(pdev);
return ret;
}
static void
qla2x00_remove_one(struct pci_dev *pdev)
{
- scsi_qla_host_t *ha, *vha, *temp;
+ scsi_qla_host_t *base_vha, *vha, *temp;
+ struct qla_hw_data *ha;
- ha = pci_get_drvdata(pdev);
+ base_vha = pci_get_drvdata(pdev);
+ ha = base_vha->hw;
- list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list)
- fc_vport_terminate(vha->fc_vport);
+ list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
+ if (vha && vha->fc_vport)
+ fc_vport_terminate(vha->fc_vport);
+ }
- set_bit(UNLOADING, &ha->dpc_flags);
+ set_bit(UNLOADING, &base_vha->dpc_flags);
- qla2x00_dfs_remove(ha);
+ qla2x00_dfs_remove(base_vha);
- qla84xx_put_chip(ha);
+ qla84xx_put_chip(base_vha);
- qla2x00_free_sysfs_attr(ha);
+ qla2x00_free_sysfs_attr(base_vha);
- fc_remove_host(ha->host);
+ fc_remove_host(base_vha->host);
- scsi_remove_host(ha->host);
+ scsi_remove_host(base_vha->host);
- qla2x00_free_device(ha);
+ qla2x00_free_device(base_vha);
- scsi_host_put(ha->host);
+ scsi_host_put(base_vha->host);
+
+ if (ha->iobase)
+ iounmap(ha->iobase);
+
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+
+ pci_release_selected_regions(ha->pdev, ha->bars);
+ kfree(ha);
+ ha = NULL;
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static void
-qla2x00_free_device(scsi_qla_host_t *ha)
+qla2x00_free_device(scsi_qla_host_t *vha)
{
- qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
+ struct qla_hw_data *ha = vha->hw;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
/* Disable timer */
- if (ha->timer_active)
- qla2x00_stop_timer(ha);
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
- ha->flags.online = 0;
+ vha->flags.online = 0;
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
@@ -1827,45 +2022,41 @@ qla2x00_free_device(scsi_qla_host_t *ha)
}
if (ha->flags.fce_enabled)
- qla2x00_disable_fce_trace(ha, NULL, NULL);
+ qla2x00_disable_fce_trace(vha, NULL, NULL);
if (ha->eft)
- qla2x00_disable_eft_trace(ha);
+ qla2x00_disable_eft_trace(vha);
/* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(ha);
+ qla2x00_try_to_stop_firmware(vha);
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops->disable_intrs(ha);
- qla2x00_mem_free(ha);
+ qla2x00_free_irqs(vha);
- qla2x00_free_irqs(ha);
+ qla2x00_mem_free(ha);
- /* release io space registers */
- if (ha->iobase)
- iounmap(ha->iobase);
- pci_release_selected_regions(ha->pdev, ha->bars);
+ qla2x00_free_queues(ha);
}
static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
int defer)
{
struct fc_rport *rport;
- scsi_qla_host_t *pha = to_qla_parent(ha);
if (!fcport->rport)
return;
rport = fcport->rport;
if (defer) {
- spin_lock_irq(ha->host->host_lock);
+ spin_lock_irq(vha->host->host_lock);
fcport->drport = rport;
- spin_unlock_irq(ha->host->host_lock);
- set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
- qla2xxx_wake_dpc(pha);
+ spin_unlock_irq(vha->host->host_lock);
+ set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
} else
fc_remote_port_delete(rport);
}
@@ -1879,13 +2070,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
*
* Context:
*/
-void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
+void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (atomic_read(&fcport->state) == FCS_ONLINE &&
- ha->vp_idx == fcport->vp_idx)
- qla2x00_schedule_rport_del(ha, fcport, defer);
-
+ vha->vp_idx == fcport->vp_idx) {
+ atomic_set(&fcport->state, FCS_DEVICE_LOST);
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ }
/*
* We may need to retry the login, so don't change the state of the
* port but do the retries.
@@ -1897,13 +2089,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
return;
if (fcport->login_retry == 0) {
- fcport->login_retry = ha->login_retry_count;
- set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
+ fcport->login_retry = vha->hw->login_retry_count;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
DEBUG(printk("scsi(%ld): Port login retry: "
"%02x%02x%02x%02x%02x%02x%02x%02x, "
"id = 0x%04x retry cnt=%d\n",
- ha->host_no,
+ vha->host_no,
fcport->port_name[0],
fcport->port_name[1],
fcport->port_name[2],
@@ -1931,13 +2123,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
* Context:
*/
void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
{
fc_port_t *fcport;
- scsi_qla_host_t *pha = to_qla_parent(ha);
- list_for_each_entry(fcport, &pha->fcports, list) {
- if (ha->vp_idx != fcport->vp_idx)
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (vha->vp_idx != fcport->vp_idx)
continue;
/*
* No point in marking the device as lost, if the device is
@@ -1945,9 +2136,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
*/
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
continue;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_schedule_rport_del(ha, fcport, defer);
- atomic_set(&fcport->state, FCS_DEVICE_LOST);
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ atomic_set(&fcport->state, FCS_DEVICE_LOST);
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ } else
+ atomic_set(&fcport->state, FCS_DEVICE_LOST);
}
}
@@ -1960,105 +2153,153 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
* !0 = failure.
*/
static int
-qla2x00_mem_alloc(scsi_qla_host_t *ha)
+qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ struct req_que **req, struct rsp_que **rsp)
{
char name[16];
- ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
- (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
- GFP_KERNEL);
- if (!ha->request_ring)
- goto fail;
-
- ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
- (ha->response_q_length + 1) * sizeof(response_t),
- &ha->response_dma, GFP_KERNEL);
- if (!ha->response_ring)
- goto fail_free_request_ring;
-
- ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
- &ha->gid_list_dma, GFP_KERNEL);
- if (!ha->gid_list)
- goto fail_free_response_ring;
+ ha->init_cb_size = sizeof(init_cb_t);
+ if (IS_QLA2XXX_MIDTYPE(ha))
+ ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
- &ha->init_cb_dma, GFP_KERNEL);
+ &ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
- goto fail_free_gid_list;
+ goto fail;
- snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
- ha->host_no);
- ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
- DMA_POOL_SIZE, 8, 0);
- if (!ha->s_dma_pool)
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
+ &ha->gid_list_dma, GFP_KERNEL);
+ if (!ha->gid_list)
goto fail_free_init_cb;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
- goto fail_free_s_dma_pool;
+ goto fail_free_gid_list;
/* Get memory for cached NVRAM */
ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
if (!ha->nvram)
goto fail_free_srb_mempool;
+ snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
+ ha->pdev->device);
+ ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DMA_POOL_SIZE, 8, 0);
+ if (!ha->s_dma_pool)
+ goto fail_free_nvram;
+
/* Allocate memory for SNS commands */
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- /* Get consistent memory allocated for SNS commands */
+ /* Get consistent memory allocated for SNS commands */
ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
+ sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
if (!ha->sns_cmd)
- goto fail_free_nvram;
+ goto fail_dma_pool;
} else {
- /* Get consistent memory allocated for MS IOCB */
+ /* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->ms_iocb_dma);
+ &ha->ms_iocb_dma);
if (!ha->ms_iocb)
- goto fail_free_nvram;
-
- /* Get consistent memory allocated for CT SNS commands */
+ goto fail_dma_pool;
+ /* Get consistent memory allocated for CT SNS commands */
ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
+ sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
if (!ha->ct_sns)
goto fail_free_ms_iocb;
}
- return 0;
+ /* Allocate memory for request ring */
+ *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
+ if (!*req) {
+ DEBUG(printk("Unable to allocate memory for req\n"));
+ goto fail_req;
+ }
+ (*req)->length = req_len;
+ (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
+ ((*req)->length + 1) * sizeof(request_t),
+ &(*req)->dma, GFP_KERNEL);
+ if (!(*req)->ring) {
+ DEBUG(printk("Unable to allocate memory for req_ring\n"));
+ goto fail_req_ring;
+ }
+ /* Allocate memory for response ring */
+ *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
+ if (!*rsp) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for rsp\n");
+ goto fail_rsp;
+ }
+ (*rsp)->hw = ha;
+ (*rsp)->length = rsp_len;
+ (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
+ ((*rsp)->length + 1) * sizeof(response_t),
+ &(*rsp)->dma, GFP_KERNEL);
+ if (!(*rsp)->ring) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for rsp_ring\n");
+ goto fail_rsp_ring;
+ }
+ (*req)->rsp = *rsp;
+ (*rsp)->req = *req;
+ /* Allocate memory for NVRAM data for vports */
+ if (ha->nvram_npiv_size) {
+ ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
+ ha->nvram_npiv_size, GFP_KERNEL);
+ if (!ha->npiv_info) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for npiv info\n");
+ goto fail_npiv_info;
+ }
+ } else
+ ha->npiv_info = NULL;
+ INIT_LIST_HEAD(&ha->vp_list);
+ return 1;
+
+fail_npiv_info:
+ dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
+ sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
+ (*rsp)->ring = NULL;
+ (*rsp)->dma = 0;
+fail_rsp_ring:
+ kfree(*rsp);
+fail_rsp:
+ dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
+ sizeof(request_t), (*req)->ring, (*req)->dma);
+ (*req)->ring = NULL;
+ (*req)->dma = 0;
+fail_req_ring:
+ kfree(*req);
+fail_req:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+ ha->ct_sns, ha->ct_sns_dma);
+ ha->ct_sns = NULL;
+ ha->ct_sns_dma = 0;
fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+fail_dma_pool:
+ dma_pool_destroy(ha->s_dma_pool);
+ ha->s_dma_pool = NULL;
fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_srb_mempool:
mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
-fail_free_s_dma_pool:
- dma_pool_destroy(ha->s_dma_pool);
- ha->s_dma_pool = NULL;
-fail_free_init_cb:
- dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
- ha->init_cb_dma);
- ha->init_cb = NULL;
- ha->init_cb_dma = 0;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
- ha->gid_list_dma);
+ ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
-fail_free_response_ring:
- dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
- sizeof(response_t), ha->response_ring, ha->response_dma);
- ha->response_ring = NULL;
- ha->response_dma = 0;
-fail_free_request_ring:
- dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
- sizeof(request_t), ha->request_ring, ha->request_dma);
- ha->request_ring = NULL;
- ha->request_dma = 0;
+fail_free_init_cb:
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
+ ha->init_cb_dma);
+ ha->init_cb = NULL;
+ ha->init_cb_dma = 0;
fail:
+ DEBUG(printk("%s: Memory allocation failure\n", __func__));
return -ENOMEM;
}
@@ -2070,32 +2311,29 @@ fail:
* ha = adapter block pointer.
*/
static void
-qla2x00_mem_free(scsi_qla_host_t *ha)
+qla2x00_mem_free(struct qla_hw_data *ha)
{
- struct list_head *fcpl, *fcptemp;
- fc_port_t *fcport;
-
if (ha->srb_mempool)
mempool_destroy(ha->srb_mempool);
if (ha->fce)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
+ ha->fce_dma);
if (ha->fw_dump) {
if (ha->eft)
dma_free_coherent(&ha->pdev->dev,
- ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+ ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
vfree(ha->fw_dump);
}
if (ha->sns_cmd)
dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
- ha->sns_cmd, ha->sns_cmd_dma);
+ ha->sns_cmd, ha->sns_cmd_dma);
if (ha->ct_sns)
dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
- ha->ct_sns, ha->ct_sns_dma);
+ ha->ct_sns, ha->ct_sns_dma);
if (ha->sfp_data)
dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2106,23 +2344,18 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
if (ha->s_dma_pool)
dma_pool_destroy(ha->s_dma_pool);
- if (ha->init_cb)
- dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
- ha->init_cb, ha->init_cb_dma);
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
- ha->gid_list_dma);
+ ha->gid_list_dma);
- if (ha->response_ring)
- dma_free_coherent(&ha->pdev->dev,
- (ha->response_q_length + 1) * sizeof(response_t),
- ha->response_ring, ha->response_dma);
- if (ha->request_ring)
- dma_free_coherent(&ha->pdev->dev,
- (ha->request_q_length + 1) * sizeof(request_t),
- ha->request_ring, ha->request_dma);
+ if (ha->init_cb)
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+ ha->init_cb, ha->init_cb_dma);
+ vfree(ha->optrom_buffer);
+ kfree(ha->nvram);
+ kfree(ha->npiv_info);
ha->srb_mempool = NULL;
ha->eft = NULL;
@@ -2141,30 +2374,45 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- ha->response_ring = NULL;
- ha->response_dma = 0;
- ha->request_ring = NULL;
- ha->request_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dumped = 0;
+ ha->fw_dump_reading = 0;
+}
- list_for_each_safe(fcpl, fcptemp, &ha->fcports) {
- fcport = list_entry(fcpl, fc_port_t, list);
+struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ struct qla_hw_data *ha)
+{
+ struct Scsi_Host *host;
+ struct scsi_qla_host *vha = NULL;
- /* fc ports */
- list_del_init(&fcport->list);
- kfree(fcport);
+ host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
+ if (host == NULL) {
+ printk(KERN_WARNING
+ "qla2xxx: Couldn't allocate host from scsi layer!\n");
+ goto fail;
}
- INIT_LIST_HEAD(&ha->fcports);
- ha->fw_dump = NULL;
- ha->fw_dumped = 0;
- ha->fw_dump_reading = 0;
+ /* Clear our data area */
+ vha = shost_priv(host);
+ memset(vha, 0, sizeof(scsi_qla_host_t));
- vfree(ha->optrom_buffer);
- kfree(ha->nvram);
+ vha->host = host;
+ vha->host_no = host->host_no;
+ vha->hw = ha;
+
+ INIT_LIST_HEAD(&vha->vp_fcports);
+ INIT_LIST_HEAD(&vha->work_list);
+ INIT_LIST_HEAD(&vha->list);
+
+ sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ return vha;
+
+fail:
+ return vha;
}
static struct qla_work_evt *
-qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
int locked)
{
struct qla_work_evt *e;
@@ -2181,42 +2429,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
}
static int
-qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
{
unsigned long uninitialized_var(flags);
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
if (!locked)
- spin_lock_irqsave(&pha->hardware_lock, flags);
- list_add_tail(&e->list, &ha->work_list);
- qla2xxx_wake_dpc(ha);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&e->list, &vha->work_list);
+ qla2xxx_wake_dpc(vha);
if (!locked)
- spin_unlock_irqrestore(&pha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
}
int
-qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code,
+qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
u32 data)
{
struct qla_work_evt *e;
- e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1);
+ e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
if (!e)
return QLA_FUNCTION_FAILED;
e->u.aen.code = code;
e->u.aen.data = data;
- return qla2x00_post_work(ha, e, 1);
+ return qla2x00_post_work(vha, e, 1);
}
int
-qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
+qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
uint16_t d2, uint16_t d3)
{
struct qla_work_evt *e;
- e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1);
+ e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
if (!e)
return QLA_FUNCTION_FAILED;
@@ -2224,36 +2472,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
e->u.hwe.d1 = d1;
e->u.hwe.d2 = d2;
e->u.hwe.d3 = d3;
- return qla2x00_post_work(ha, e, 1);
+ return qla2x00_post_work(vha, e, 1);
}
static void
-qla2x00_do_work(struct scsi_qla_host *ha)
+qla2x00_do_work(struct scsi_qla_host *vha)
{
struct qla_work_evt *e;
- scsi_qla_host_t *pha = to_qla_parent(ha);
+ struct qla_hw_data *ha = vha->hw;
- spin_lock_irq(&pha->hardware_lock);
- while (!list_empty(&ha->work_list)) {
- e = list_entry(ha->work_list.next, struct qla_work_evt, list);
+ spin_lock_irq(&ha->hardware_lock);
+ while (!list_empty(&vha->work_list)) {
+ e = list_entry(vha->work_list.next, struct qla_work_evt, list);
list_del_init(&e->list);
- spin_unlock_irq(&pha->hardware_lock);
+ spin_unlock_irq(&ha->hardware_lock);
switch (e->type) {
case QLA_EVT_AEN:
- fc_host_post_event(ha->host, fc_get_event_number(),
+ fc_host_post_event(vha->host, fc_get_event_number(),
e->u.aen.code, e->u.aen.data);
break;
case QLA_EVT_HWE_LOG:
- qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1,
+ qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
e->u.hwe.d2, e->u.hwe.d3);
break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
- spin_lock_irq(&pha->hardware_lock);
+ spin_lock_irq(&ha->hardware_lock);
+ }
+ spin_unlock_irq(&ha->hardware_lock);
+}
+/* Relogins all the fcports of a vport
+ * Context: dpc thread
+ */
+void qla2x00_relogin(struct scsi_qla_host *vha)
+{
+ fc_port_t *fcport;
+ uint8_t status;
+ uint16_t next_loopid = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
+ if (atomic_read(&fcport->state) !=
+ FCS_ONLINE && fcport->login_retry) {
+
+ if (fcport->flags & FCF_FABRIC_DEVICE) {
+ if (fcport->flags & FCF_TAPE_PRESENT)
+ ha->isp_ops->fabric_logout(vha,
+ fcport->loop_id,
+ fcport->d_id.b.domain,
+ fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
+
+ status = qla2x00_fabric_login(vha, fcport,
+ &next_loopid);
+ } else
+ status = qla2x00_local_device_login(vha,
+ fcport);
+
+ fcport->login_retry--;
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id = fcport->loop_id;
+
+ DEBUG(printk("scsi(%ld): port login OK: logged "
+ "in ID 0x%x\n", vha->host_no, fcport->loop_id));
+
+ qla2x00_update_fcport(vha, fcport);
+
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* retry the login again */
+ DEBUG(printk("scsi(%ld): Retrying"
+ " %d login again loop_id 0x%x\n",
+ vha->host_no, fcport->login_retry,
+ fcport->loop_id));
+ } else {
+ fcport->login_retry = 0;
+ }
+
+ if (fcport->login_retry == 0 && status != QLA_SUCCESS)
+ fcport->loop_id = FC_NO_LOOP_ID;
+ }
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
}
- spin_unlock_irq(&pha->hardware_lock);
}
/**************************************************************************
@@ -2273,15 +2580,11 @@ static int
qla2x00_do_dpc(void *data)
{
int rval;
- scsi_qla_host_t *ha;
- fc_port_t *fcport;
- uint8_t status;
- uint16_t next_loopid;
- struct scsi_qla_host *vha;
- int i;
+ scsi_qla_host_t *base_vha;
+ struct qla_hw_data *ha;
-
- ha = (scsi_qla_host_t *)data;
+ ha = (struct qla_hw_data *)data;
+ base_vha = pci_get_drvdata(ha->pdev);
set_user_nice(current, -20);
@@ -2295,10 +2598,10 @@ qla2x00_do_dpc(void *data)
DEBUG3(printk("qla2x00: DPC handler waking up\n"));
/* Initialization not yet finished. Don't do anything yet. */
- if (!ha->flags.init_done)
+ if (!base_vha->flags.init_done)
continue;
- DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no));
+ DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
ha->dpc_active = 1;
@@ -2307,149 +2610,98 @@ qla2x00_do_dpc(void *data)
continue;
}
- qla2x00_do_work(ha);
+ qla2x00_do_work(base_vha);
- if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) {
+ if (test_and_clear_bit(ISP_ABORT_NEEDED,
+ &base_vha->dpc_flags)) {
DEBUG(printk("scsi(%ld): dpc: sched "
"qla2x00_abort_isp ha = %p\n",
- ha->host_no, ha));
+ base_vha->host_no, ha));
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
- &ha->dpc_flags))) {
+ &base_vha->dpc_flags))) {
- if (qla2x00_abort_isp(ha)) {
+ if (qla2x00_abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
- &ha->dpc_flags);
- }
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- }
-
- for_each_mapped_vp_idx(ha, i) {
- list_for_each_entry(vha, &ha->vp_list,
- vp_list) {
- if (i == vha->vp_idx) {
- set_bit(ISP_ABORT_NEEDED,
- &vha->dpc_flags);
- break;
- }
+ &base_vha->dpc_flags);
}
+ clear_bit(ABORT_ISP_ACTIVE,
+ &base_vha->dpc_flags);
}
DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
- ha->host_no));
+ base_vha->host_no));
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
- qla2x00_update_fcports(ha);
- clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
+ if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
+ qla2x00_update_fcports(base_vha);
+ clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
- if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
- (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
+ if (test_and_clear_bit(RESET_MARKER_NEEDED,
+ &base_vha->dpc_flags) &&
+ (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
- ha->host_no));
+ base_vha->host_no));
- qla2x00_rst_aen(ha);
- clear_bit(RESET_ACTIVE, &ha->dpc_flags);
+ qla2x00_rst_aen(base_vha);
+ clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) &&
- atomic_read(&ha->loop_state) != LOOP_DOWN) {
+ if ((test_and_clear_bit(RELOGIN_NEEDED,
+ &base_vha->dpc_flags)) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
+ atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
- ha->host_no));
-
- next_loopid = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
- if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry) {
-
- if (fcport->flags & FCF_FABRIC_DEVICE) {
- if (fcport->flags &
- FCF_TAPE_PRESENT)
- ha->isp_ops->fabric_logout(
- ha, fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- status = qla2x00_fabric_login(
- ha, fcport, &next_loopid);
- } else
- status =
- qla2x00_local_device_login(
- ha, fcport);
-
- fcport->login_retry--;
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
-
- DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
- ha->host_no, fcport->loop_id));
-
- qla2x00_update_fcport(ha,
- fcport);
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
- /* retry the login again */
- DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
- ha->host_no,
- fcport->login_retry, fcport->loop_id));
- } else {
- fcport->login_retry = 0;
- }
- if (fcport->login_retry == 0 && status != QLA_SUCCESS)
- fcport->loop_id = FC_NO_LOOP_ID;
- }
- if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
- break;
- }
+ base_vha->host_no));
+ qla2x00_relogin(base_vha);
+
DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
- ha->host_no));
+ base_vha->host_no));
}
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
+ if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ &base_vha->dpc_flags)) {
DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
- ha->host_no));
+ base_vha->host_no));
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
- &ha->dpc_flags))) {
+ &base_vha->dpc_flags))) {
- rval = qla2x00_loop_resync(ha);
+ rval = qla2x00_loop_resync(base_vha);
- clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
+ clear_bit(LOOP_RESYNC_ACTIVE,
+ &base_vha->dpc_flags);
}
DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
- ha->host_no));
+ base_vha->host_no));
}
- if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
- atomic_read(&ha->loop_state) == LOOP_READY) {
- clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
- qla2xxx_flash_npiv_conf(ha);
+ if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
+ atomic_read(&base_vha->loop_state) == LOOP_READY) {
+ clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
+ qla2xxx_flash_npiv_conf(base_vha);
}
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
- if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
- ha->isp_ops->beacon_blink(ha);
+ if (test_and_clear_bit(BEACON_BLINK_NEEDED,
+ &base_vha->dpc_flags))
+ ha->isp_ops->beacon_blink(base_vha);
- qla2x00_do_dpc_all_vps(ha);
+ qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
} /* End of while(1) */
- DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no));
+ DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
/*
* Make sure that nobody tries to wake us up again.
@@ -2460,11 +2712,12 @@ qla2x00_do_dpc(void *data)
}
void
-qla2xxx_wake_dpc(scsi_qla_host_t *ha)
+qla2xxx_wake_dpc(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
struct task_struct *t = ha->dpc_thread;
- if (!test_bit(UNLOADING, &ha->dpc_flags) && t)
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
wake_up_process(t);
}
@@ -2476,26 +2729,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
* ha = adapter block pointer.
*/
static void
-qla2x00_rst_aen(scsi_qla_host_t *ha)
+qla2x00_rst_aen(scsi_qla_host_t *vha)
{
- if (ha->flags.online && !ha->flags.reset_active &&
- !atomic_read(&ha->loop_down_timer) &&
- !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
+ if (vha->flags.online && !vha->flags.reset_active &&
+ !atomic_read(&vha->loop_down_timer) &&
+ !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
do {
- clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
+ clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
/*
* Issue marker command only when we are going to start
* the I/O.
*/
- ha->marker_needed = 1;
- } while (!atomic_read(&ha->loop_down_timer) &&
- (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags)));
+ vha->marker_needed = 1;
+ } while (!atomic_read(&vha->loop_down_timer) &&
+ (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
}
}
static void
-qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_sp_free_dma(srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
@@ -2507,11 +2760,11 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
}
void
-qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
+qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
{
struct scsi_cmnd *cmd = sp->cmd;
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
@@ -2527,7 +2780,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
* Context: Interrupt
***************************************************************************/
void
-qla2x00_timer(scsi_qla_host_t *ha)
+qla2x00_timer(scsi_qla_host_t *vha)
{
unsigned long cpu_flags = 0;
fc_port_t *fcport;
@@ -2535,8 +2788,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
int index;
srb_t *sp;
int t;
- scsi_qla_host_t *pha = to_qla_parent(ha);
-
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req;
/*
* Ports - Port down timer.
*
@@ -2545,7 +2798,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
* the port it marked DEAD.
*/
t = 0;
- list_for_each_entry(fcport, &ha->fcports, list) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
continue;
@@ -2559,7 +2812,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
"%d remaining\n",
- ha->host_no,
+ vha->host_no,
t, atomic_read(&fcport->port_down_timer)));
}
t++;
@@ -2567,30 +2820,32 @@ qla2x00_timer(scsi_qla_host_t *ha)
/* Loop down handler. */
- if (atomic_read(&ha->loop_down_timer) > 0 &&
- !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) {
+ if (atomic_read(&vha->loop_down_timer) > 0 &&
+ !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ && vha->flags.online) {
- if (atomic_read(&ha->loop_down_timer) ==
- ha->loop_down_abort_time) {
+ if (atomic_read(&vha->loop_down_timer) ==
+ vha->loop_down_abort_time) {
DEBUG(printk("scsi(%ld): Loop Down - aborting the "
"queues before time expire\n",
- ha->host_no));
+ vha->host_no));
- if (!IS_QLA2100(ha) && ha->link_down_timeout)
- atomic_set(&ha->loop_state, LOOP_DEAD);
+ if (!IS_QLA2100(ha) && vha->link_down_timeout)
+ atomic_set(&vha->loop_state, LOOP_DEAD);
/* Schedule an ISP abort to return any tape commands. */
/* NPIV - scan physical port only */
- if (!ha->parent) {
+ if (!vha->vp_idx) {
spin_lock_irqsave(&ha->hardware_lock,
cpu_flags);
+ req = ha->req_q_map[0];
for (index = 1;
index < MAX_OUTSTANDING_COMMANDS;
index++) {
fc_port_t *sfcp;
- sp = ha->outstanding_cmds[index];
+ sp = req->outstanding_cmds[index];
if (!sp)
continue;
sfcp = sp->fcport;
@@ -2598,63 +2853,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
continue;
set_bit(ISP_ABORT_NEEDED,
- &ha->dpc_flags);
+ &vha->dpc_flags);
break;
}
spin_unlock_irqrestore(&ha->hardware_lock,
- cpu_flags);
+ cpu_flags);
}
- set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
+ set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
start_dpc++;
}
/* if the loop has been down for 4 minutes, reinit adapter */
- if (atomic_dec_and_test(&ha->loop_down_timer) != 0) {
+ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
"restarting queues.\n",
- ha->host_no));
+ vha->host_no));
- set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags);
+ set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
start_dpc++;
- if (!(ha->device_flags & DFLG_NO_CABLE) &&
- !ha->parent) {
+ if (!(vha->device_flags & DFLG_NO_CABLE) &&
+ !vha->vp_idx) {
DEBUG(printk("scsi(%ld): Loop down - "
"aborting ISP.\n",
- ha->host_no));
+ vha->host_no));
qla_printk(KERN_WARNING, ha,
"Loop down - aborting ISP.\n");
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
}
DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
- ha->host_no,
- atomic_read(&ha->loop_down_timer)));
+ vha->host_no,
+ atomic_read(&vha->loop_down_timer)));
}
/* Check if beacon LED needs to be blinked */
if (ha->beacon_blink_led == 1) {
- set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags);
+ set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
start_dpc++;
}
/* Process any deferred work. */
- if (!list_empty(&ha->work_list))
+ if (!list_empty(&vha->work_list))
start_dpc++;
/* Schedule the DPC routine if needed */
- if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
- test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) ||
+ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
start_dpc ||
- test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
- test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
- test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &ha->dpc_flags)))
- qla2xxx_wake_dpc(pha);
+ test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
+ test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
+ test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
+ qla2xxx_wake_dpc(vha);
- qla2x00_restart_timer(ha, WATCH_INTERVAL);
+ qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
/* Firmware interface routines. */
@@ -2686,8 +2941,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
};
struct fw_blob *
-qla2x00_request_firmware(scsi_qla_host_t *ha)
+qla2x00_request_firmware(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
struct fw_blob *blob;
blob = NULL;
@@ -2711,7 +2967,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
DEBUG2(printk("scsi(%ld): Failed to load firmware image "
- "(%s).\n", ha->host_no, blob->name));
+ "(%s).\n", vha->host_no, blob->name));
blob->fw = NULL;
blob = NULL;
goto out;
@@ -2756,7 +3012,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
int risc_paused = 0;
uint32_t stat;
unsigned long flags;
- scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
@@ -2779,7 +3036,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (risc_paused) {
qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
"Dumping firmware!\n");
- ha->isp_ops->fw_dump(ha, 0);
+ ha->isp_ops->fw_dump(base_vha, 0);
return PCI_ERS_RESULT_NEED_RESET;
} else
@@ -2790,7 +3047,8 @@ static pci_ers_result_t
qla2xxx_pci_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
- scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
int rc;
if (ha->mem_only)
@@ -2806,13 +3064,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
}
pci_set_master(pdev);
- if (ha->isp_ops->pci_config(ha))
+ if (ha->isp_ops->pci_config(base_vha))
return ret;
- set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
- if (qla2x00_abort_isp(ha)== QLA_SUCCESS)
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
- clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
return ret;
}
@@ -2820,10 +3078,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
static void
qla2xxx_pci_resume(struct pci_dev *pdev)
{
- scsi_qla_host_t *ha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = base_vha->hw;
int ret;
- ret = qla2x00_wait_for_hba_online(ha);
+ ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 90a13211717f..c538ee1b1a31 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -10,10 +10,6 @@
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
-static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t);
-static void qla2x00_nv_deselect(scsi_qla_host_t *);
-static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
-
/*
* NVRAM support routines
*/
@@ -23,7 +19,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
* @ha: HA context
*/
static void
-qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
+qla2x00_lock_nvram_access(struct qla_hw_data *ha)
{
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -56,7 +52,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
* @ha: HA context
*/
static void
-qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
+qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -67,6 +63,84 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
}
/**
+ * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
+ * @ha: HA context
+ * @data: Serial interface selector
+ */
+static void
+qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+}
+
+/**
+ * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
+ * NVRAM.
+ * @ha: HA context
+ * @nv_cmd: NVRAM command
+ *
+ * Bit definitions for NVRAM command:
+ *
+ * Bit 26 = start bit
+ * Bit 25, 24 = opcode
+ * Bit 23-16 = address
+ * Bit 15-0 = write data
+ *
+ * Returns the word read from nvram @addr.
+ */
+static uint16_t
+qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
+{
+ uint8_t cnt;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ uint16_t data = 0;
+ uint16_t reg_data;
+
+ /* Send command to NVRAM. */
+ nv_cmd <<= 5;
+ for (cnt = 0; cnt < 11; cnt++) {
+ if (nv_cmd & BIT_31)
+ qla2x00_nv_write(ha, NVR_DATA_OUT);
+ else
+ qla2x00_nv_write(ha, 0);
+ nv_cmd <<= 1;
+ }
+
+ /* Read data from NVRAM. */
+ for (cnt = 0; cnt < 16; cnt++) {
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ data <<= 1;
+ reg_data = RD_REG_WORD(&reg->nvram);
+ if (reg_data & NVR_DATA_IN)
+ data |= BIT_0;
+ WRT_REG_WORD(&reg->nvram, NVR_SELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+ }
+
+ /* Deselect chip. */
+ WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+
+ return data;
+}
+
+
+/**
* qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
* request routine to get the word from NVRAM.
* @ha: HA context
@@ -75,7 +149,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
* Returns the word read from nvram @addr.
*/
static uint16_t
-qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
+qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
{
uint16_t data;
uint32_t nv_cmd;
@@ -88,13 +162,27 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
}
/**
+ * qla2x00_nv_deselect() - Deselect NVRAM operations.
+ * @ha: HA context
+ */
+static void
+qla2x00_nv_deselect(struct qla_hw_data *ha)
+{
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+
+ WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
+ RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ NVRAM_DELAY();
+}
+
+/**
* qla2x00_write_nvram_word() - Write NVRAM data.
* @ha: HA context
* @addr: Address in NVRAM to write
* @data: word to program
*/
static void
-qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
{
int count;
uint16_t word;
@@ -132,7 +220,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
do {
if (!--wait_cnt) {
DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
- __func__, ha->host_no));
+ __func__, vha->host_no));
break;
}
NVRAM_DELAY();
@@ -150,8 +238,8 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
}
static int
-qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
- uint32_t tmo)
+qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
+ uint16_t data, uint32_t tmo)
{
int ret, count;
uint16_t word;
@@ -209,102 +297,11 @@ qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
}
/**
- * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
- * NVRAM.
- * @ha: HA context
- * @nv_cmd: NVRAM command
- *
- * Bit definitions for NVRAM command:
- *
- * Bit 26 = start bit
- * Bit 25, 24 = opcode
- * Bit 23-16 = address
- * Bit 15-0 = write data
- *
- * Returns the word read from nvram @addr.
- */
-static uint16_t
-qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
-{
- uint8_t cnt;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t data = 0;
- uint16_t reg_data;
-
- /* Send command to NVRAM. */
- nv_cmd <<= 5;
- for (cnt = 0; cnt < 11; cnt++) {
- if (nv_cmd & BIT_31)
- qla2x00_nv_write(ha, NVR_DATA_OUT);
- else
- qla2x00_nv_write(ha, 0);
- nv_cmd <<= 1;
- }
-
- /* Read data from NVRAM. */
- for (cnt = 0; cnt < 16; cnt++) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
- data <<= 1;
- reg_data = RD_REG_WORD(&reg->nvram);
- if (reg_data & NVR_DATA_IN)
- data |= BIT_0;
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
- }
-
- /* Deselect chip. */
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
-
- return (data);
-}
-
-/**
- * qla2x00_nv_write() - Clean NVRAM operations.
- * @ha: HA context
- */
-static void
-qla2x00_nv_deselect(scsi_qla_host_t *ha)
-{
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
-}
-
-/**
- * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
- * @ha: HA context
- * @data: Serial interface selector
- */
-static void
-qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
-{
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
-
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT| NVR_CLOCK |
- NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
- NVRAM_DELAY();
-}
-
-/**
* qla2x00_clear_nvram_protection() -
* @ha: HA context
*/
static int
-qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
+qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
{
int ret, stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -352,9 +349,8 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(printk("%s(%ld): NVRAM didn't go "
- "ready...\n", __func__,
- ha->host_no));
+ DEBUG9_10(qla_printk(
+ "NVRAM didn't go ready...\n"));
break;
}
NVRAM_DELAY();
@@ -370,7 +366,7 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
}
static void
-qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
+qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
@@ -412,8 +408,7 @@ qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
- __func__, ha->host_no));
+ DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
break;
}
NVRAM_DELAY();
@@ -454,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
}
static uint32_t
-qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
+qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
{
int rval;
uint32_t cnt, data;
@@ -482,21 +477,20 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
}
uint32_t *
-qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
uint32_t i;
-
/* Dword reads to flash. */
for (i = 0; i < dwords; i++, faddr++)
- dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
flash_data_to_access_addr(faddr)));
return dwptr;
}
static int
-qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
+qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
{
int rval;
uint32_t cnt;
@@ -519,7 +513,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
}
static void
-qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
+qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
uint8_t *flash_id)
{
uint32_t ids;
@@ -544,7 +538,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
static int
-qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
+qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
@@ -552,6 +546,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
uint8_t *buf, *bcode, last_image;
uint16_t cnt, chksum, *wptr;
struct qla_flt_location *fltl;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
/*
* FLT-location structure resides after the last PCI region.
@@ -563,20 +559,20 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
FA_FLASH_LAYOUT_ADDR;
/* Begin with first PCI expansion ROM header. */
- buf = (uint8_t *)ha->request_ring;
- dcode = (uint32_t *)ha->request_ring;
+ buf = (uint8_t *)req->ring;
+ dcode = (uint32_t *)req->ring;
pcihdr = 0;
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
- qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
bcode = buf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
goto end;
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
- qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+ qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
bcode = buf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
@@ -591,14 +587,14 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
} while (!last_image);
/* Now verify FLT-location structure. */
- fltl = (struct qla_flt_location *)ha->request_ring;
- qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
+ fltl = (struct qla_flt_location *)req->ring;
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
sizeof(struct qla_flt_location) >> 2);
if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
goto end;
- wptr = (uint16_t *)ha->request_ring;
+ wptr = (uint16_t *)req->ring;
cnt = sizeof(struct qla_flt_location) >> 1;
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
@@ -619,7 +615,7 @@ end:
}
static void
-qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
+qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
{
const char *loc, *locations[] = { "DEF", "FLT" };
uint16_t *wptr;
@@ -627,12 +623,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
uint32_t start;
struct qla_flt_header *flt;
struct qla_flt_region *region;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)ha->request_ring;
- flt = (struct qla_flt_header *)ha->request_ring;
+ wptr = (uint16_t *)req->ring;
+ flt = (struct qla_flt_header *)req->ring;
region = (struct qla_flt_region *)&flt[1];
- ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
flt_addr << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
@@ -720,8 +718,9 @@ done:
}
static void
-qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
+qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
{
+#define FLASH_BLK_SIZE_4K 0x1000
#define FLASH_BLK_SIZE_32K 0x8000
#define FLASH_BLK_SIZE_64K 0x10000
const char *loc, *locations[] = { "MID", "FDT" };
@@ -730,10 +729,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
struct qla_fdt_layout *fdt;
uint8_t man_id, flash_id;
uint16_t mid, fid;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
- wptr = (uint16_t *)ha->request_ring;
- fdt = (struct qla_fdt_layout *)ha->request_ring;
- ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
+ wptr = (uint16_t *)req->ring;
+ fdt = (struct qla_fdt_layout *)req->ring;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
@@ -755,7 +756,6 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
loc = locations[1];
mid = le16_to_cpu(fdt->man_id);
fid = le16_to_cpu(fdt->id);
- ha->fdt_odd_index = mid == 0x1f;
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -788,8 +788,7 @@ no_flash_data:
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
break;
case 0x1f: /* Atmel 26DF081A. */
- ha->fdt_odd_index = 1;
- ha->fdt_block_size = FLASH_BLK_SIZE_64K;
+ ha->fdt_block_size = FLASH_BLK_SIZE_4K;
ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0320);
ha->fdt_unprotect_sec_cmd = flash_conf_to_access_addr(0x0339);
ha->fdt_protect_sec_cmd = flash_conf_to_access_addr(0x0336);
@@ -801,45 +800,48 @@ no_flash_data:
}
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
- ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
+ ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
ha->fdt_block_size));
}
int
-qla2xxx_get_flash_info(scsi_qla_host_t *ha)
+qla2xxx_get_flash_info(scsi_qla_host_t *vha)
{
int ret;
uint32_t flt_addr;
+ struct qla_hw_data *ha = vha->hw;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_SUCCESS;
- ret = qla2xxx_find_flt_start(ha, &flt_addr);
+ ret = qla2xxx_find_flt_start(vha, &flt_addr);
if (ret != QLA_SUCCESS)
return ret;
- qla2xxx_get_flt_info(ha, flt_addr);
- qla2xxx_get_fdt_info(ha);
+ qla2xxx_get_flt_info(vha, flt_addr);
+ qla2xxx_get_fdt_info(vha);
return QLA_SUCCESS;
}
void
-qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
+qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
uint16_t *wptr;
uint16_t cnt, chksum;
+ int i;
struct qla_npiv_header hdr;
struct qla_npiv_entry *entry;
+ struct qla_hw_data *ha = vha->hw;
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return;
- ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
+ ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
if (hdr.version == __constant_cpu_to_le16(0xffff))
return;
@@ -858,7 +860,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
return;
}
- ha->isp_ops->read_optrom(ha, (uint8_t *)data,
+ ha->isp_ops->read_optrom(vha, (uint8_t *)data,
ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
@@ -875,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
entry = data + sizeof(struct qla_npiv_header);
cnt = le16_to_cpu(hdr.entries);
- for ( ; cnt; cnt--, entry++) {
+ for (i = 0; cnt; cnt--, entry++, i++) {
uint16_t flags;
struct fc_vport_identifiers vid;
struct fc_vport *vport;
@@ -893,25 +895,29 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
+ memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
- "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name,
- le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
-
- vport = fc_vport_create(ha->host, 0, &vid);
- if (!vport)
- qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
- "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name);
+ "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+ vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
+ entry->q_qos, entry->f_qos));
+
+ if (i < QLA_PRECONFIG_VPORTS) {
+ vport = fc_vport_create(vha->host, 0, &vid);
+ if (!vport)
+ qla_printk(KERN_INFO, ha,
+ "NPIV-Config: Failed to create vport [%02x]: "
+ "wwpn=%llx wwnn=%llx.\n", cnt,
+ vid.port_name, vid.node_name);
+ }
}
done:
kfree(data);
+ ha->npiv_info = NULL;
}
static void
-qla24xx_unprotect_flash(scsi_qla_host_t *ha)
+qla24xx_unprotect_flash(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -930,7 +936,7 @@ qla24xx_unprotect_flash(scsi_qla_host_t *ha)
}
static void
-qla24xx_protect_flash(scsi_qla_host_t *ha)
+qla24xx_protect_flash(struct qla_hw_data *ha)
{
uint32_t cnt;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -956,7 +962,7 @@ skip_wrt_protect:
}
static int
-qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
@@ -966,6 +972,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
dma_addr_t optrom_dma;
void *optrom = NULL;
uint32_t *s, *d;
+ struct qla_hw_data *ha = vha->hw;
ret = QLA_SUCCESS;
@@ -987,13 +994,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
qla24xx_unprotect_flash(ha);
for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) {
- if (ha->fdt_odd_index) {
- findex = faddr << 2;
- fdata = findex & sec_mask;
- } else {
- findex = faddr;
- fdata = (findex & sec_mask) << 2;
- }
+
+ findex = faddr;
+ fdata = (findex & sec_mask) << 2;
/* Are we at the beginning of a sector? */
if ((findex & rest_addr) == 0) {
@@ -1007,9 +1010,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
(fdata & 0xff00) |((fdata << 16) &
0xff0000) | ((fdata >> 16) & 0xff));
if (ret != QLA_SUCCESS) {
- DEBUG9(printk("%s(%ld) Unable to flash "
- "sector: address=%x.\n", __func__,
- ha->host_no, faddr));
+ DEBUG9(qla_printk("Unable to flash sector: "
+ "address=%x.\n", faddr));
break;
}
}
@@ -1021,7 +1023,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
*s = cpu_to_le32(*d);
- ret = qla2x00_load_ram(ha, optrom_dma,
+ ret = qla2x00_load_ram(vha, optrom_dma,
flash_data_to_access_addr(faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
@@ -1049,7 +1051,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
if (ret != QLA_SUCCESS) {
DEBUG9(printk("%s(%ld) Unable to program flash "
"address=%x data=%x.\n", __func__,
- ha->host_no, faddr, *dwptr));
+ vha->host_no, faddr, *dwptr));
break;
}
@@ -1072,11 +1074,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
}
uint8_t *
-qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
uint16_t *wptr;
+ struct qla_hw_data *ha = vha->hw;
/* Word reads to NVRAM via registers. */
wptr = (uint16_t *)buf;
@@ -1090,7 +1093,7 @@ qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
}
uint8_t *
-qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
@@ -1099,20 +1102,21 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
for (i = 0; i < bytes >> 2; i++, naddr++)
- dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
+ dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
nvram_data_to_access_addr(naddr)));
return buf;
}
int
-qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
int ret, stat;
uint32_t i;
uint16_t *wptr;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
ret = QLA_SUCCESS;
@@ -1139,12 +1143,13 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
}
int
-qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
int ret;
uint32_t i;
uint32_t *dwptr;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ret = QLA_SUCCESS;
@@ -1167,9 +1172,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
nvram_data_to_access_addr(naddr),
cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(printk("%s(%ld) Unable to program "
- "nvram address=%x data=%x.\n", __func__,
- ha->host_no, naddr, *dwptr));
+ DEBUG9(qla_printk("Unable to program nvram address=%x "
+ "data=%x.\n", naddr, *dwptr));
break;
}
}
@@ -1187,11 +1191,12 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
}
uint8_t *
-qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
uint32_t *dwptr;
+ struct qla_hw_data *ha = vha->hw;
/* Dword reads to flash. */
dwptr = (uint32_t *)buf;
@@ -1204,19 +1209,20 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
}
int
-qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
+qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t bytes)
{
+ struct qla_hw_data *ha = vha->hw;
#define RMW_BUFFER_SIZE (64 * 1024)
uint8_t *dbuf;
dbuf = vmalloc(RMW_BUFFER_SIZE);
if (!dbuf)
return QLA_MEMORY_ALLOC_FAILED;
- ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
+ ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
memcpy(dbuf + (naddr << 2), buf, bytes);
- ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
+ ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
RMW_BUFFER_SIZE);
vfree(dbuf);
@@ -1224,7 +1230,7 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
}
static inline void
-qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
+qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
{
if (IS_QLA2322(ha)) {
/* Flip all colors. */
@@ -1254,12 +1260,13 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
void
-qla2x00_beacon_blink(struct scsi_qla_host *ha)
+qla2x00_beacon_blink(struct scsi_qla_host *vha)
{
uint16_t gpio_enable;
uint16_t gpio_data;
uint16_t led_color = 0;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1303,17 +1310,18 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
}
int
-qla2x00_beacon_on(struct scsi_qla_host *ha)
+qla2x00_beacon_on(struct scsi_qla_host *vha)
{
uint16_t gpio_enable;
uint16_t gpio_data;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
- if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
@@ -1359,9 +1367,10 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
}
int
-qla2x00_beacon_off(struct scsi_qla_host *ha)
+qla2x00_beacon_off(struct scsi_qla_host *vha)
{
int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
ha->beacon_blink_led = 0;
@@ -1371,12 +1380,12 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
else
ha->beacon_color_state = QLA_LED_GRN_ON;
- ha->isp_ops->beacon_blink(ha); /* This turns green LED off */
+ ha->isp_ops->beacon_blink(vha); /* This turns green LED off */
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
- rval = qla2x00_set_fw_options(ha, ha->fw_options);
+ rval = qla2x00_set_fw_options(vha, ha->fw_options);
if (rval != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"Unable to update fw options (beacon off).\n");
@@ -1385,7 +1394,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
static inline void
-qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
+qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
{
/* Flip all colors. */
if (ha->beacon_color_state == QLA_LED_ALL_ON) {
@@ -1400,11 +1409,12 @@ qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
}
void
-qla24xx_beacon_blink(struct scsi_qla_host *ha)
+qla24xx_beacon_blink(struct scsi_qla_host *vha)
{
uint16_t led_color = 0;
uint32_t gpio_data;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Save the Original GPIOD. */
@@ -1433,20 +1443,21 @@ qla24xx_beacon_blink(struct scsi_qla_host *ha)
}
int
-qla24xx_beacon_on(struct scsi_qla_host *ha)
+qla24xx_beacon_on(struct scsi_qla_host *vha)
{
uint32_t gpio_data;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
if (ha->beacon_blink_led == 0) {
/* Enable firmware for update */
ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
- if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS)
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
- if (qla2x00_get_fw_options(ha, ha->fw_options) !=
+ if (qla2x00_get_fw_options(vha, ha->fw_options) !=
QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to update fw options (beacon on).\n");
@@ -1474,16 +1485,17 @@ qla24xx_beacon_on(struct scsi_qla_host *ha)
}
int
-qla24xx_beacon_off(struct scsi_qla_host *ha)
+qla24xx_beacon_off(struct scsi_qla_host *vha)
{
uint32_t gpio_data;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ha->beacon_blink_led = 0;
ha->beacon_color_state = QLA_LED_ALL_ON;
- ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */
+ ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1497,13 +1509,13 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
- if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+ if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to update fw options (beacon off).\n");
return QLA_FUNCTION_FAILED;
}
- if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) {
+ if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to get fw options (beacon off).\n");
return QLA_FUNCTION_FAILED;
@@ -1522,7 +1534,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
* @ha: HA context
*/
static void
-qla2x00_flash_enable(scsi_qla_host_t *ha)
+qla2x00_flash_enable(struct qla_hw_data *ha)
{
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1538,7 +1550,7 @@ qla2x00_flash_enable(scsi_qla_host_t *ha)
* @ha: HA context
*/
static void
-qla2x00_flash_disable(scsi_qla_host_t *ha)
+qla2x00_flash_disable(struct qla_hw_data *ha)
{
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1559,7 +1571,7 @@ qla2x00_flash_disable(scsi_qla_host_t *ha)
* Returns the byte read from flash @addr.
*/
static uint8_t
-qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
+qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
{
uint16_t data;
uint16_t bank_select;
@@ -1620,7 +1632,7 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
* @data: Data to write
*/
static void
-qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
+qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
{
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1683,7 +1695,7 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
* Returns 0 on success, else non-zero.
*/
static int
-qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
+qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
uint8_t man_id, uint8_t flash_id)
{
int status;
@@ -1723,8 +1735,8 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
* Returns 0 on success, else non-zero.
*/
static int
-qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
- uint8_t man_id, uint8_t flash_id)
+qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
+ uint8_t data, uint8_t man_id, uint8_t flash_id)
{
/* Write Program Command Sequence. */
if (IS_OEM_001(ha)) {
@@ -1760,7 +1772,7 @@ qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
* Returns 0 on success, else non-zero.
*/
static int
-qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
+qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
{
/* Individual Sector Erase Command Sequence */
if (IS_OEM_001(ha)) {
@@ -1796,7 +1808,7 @@ qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
* Returns 0 on success, else non-zero.
*/
static int
-qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
+qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
{
/* Individual Sector Erase Command Sequence */
@@ -1822,7 +1834,7 @@ qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
* @flash_id: Flash ID
*/
static void
-qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
+qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
uint8_t *flash_id)
{
qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
@@ -1836,8 +1848,8 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
}
static void
-qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
- uint32_t length)
+qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
+ uint32_t saddr, uint32_t length)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t midpoint, ilength;
@@ -1861,14 +1873,15 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
}
static inline void
-qla2x00_suspend_hba(struct scsi_qla_host *ha)
+qla2x00_suspend_hba(struct scsi_qla_host *vha)
{
int cnt;
unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Suspend HBA. */
- scsi_block_requests(ha->host);
+ scsi_block_requests(vha->host);
ha->isp_ops->disable_intrs(ha);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -1889,26 +1902,29 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
}
static inline void
-qla2x00_resume_hba(struct scsi_qla_host *ha)
+qla2x00_resume_hba(struct scsi_qla_host *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- qla2xxx_wake_dpc(ha);
- qla2x00_wait_for_hba_online(ha);
- scsi_unblock_requests(ha->host);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ scsi_unblock_requests(vha->host);
}
uint8_t *
-qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
uint32_t addr, midpoint;
uint8_t *data;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Suspend HBA. */
- qla2x00_suspend_hba(ha);
+ qla2x00_suspend_hba(vha);
/* Go with read. */
midpoint = ha->optrom_size / 2;
@@ -1927,13 +1943,13 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
qla2x00_flash_disable(ha);
/* Resume HBA. */
- qla2x00_resume_hba(ha);
+ qla2x00_resume_hba(vha);
return buf;
}
int
-qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
@@ -1941,10 +1957,11 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
uint8_t man_id, flash_id, sec_number, data;
uint16_t wd;
uint32_t addr, liter, sec_mask, rest_addr;
+ struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Suspend HBA. */
- qla2x00_suspend_hba(ha);
+ qla2x00_suspend_hba(vha);
rval = QLA_SUCCESS;
sec_number = 0;
@@ -2144,55 +2161,58 @@ update_flash:
qla2x00_flash_disable(ha);
/* Resume HBA. */
- qla2x00_resume_hba(ha);
+ qla2x00_resume_hba(vha);
return rval;
}
uint8_t *
-qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
+ struct qla_hw_data *ha = vha->hw;
+
/* Suspend HBA. */
- scsi_block_requests(ha->host);
+ scsi_block_requests(vha->host);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
- scsi_unblock_requests(ha->host);
+ scsi_unblock_requests(vha->host);
return buf;
}
int
-qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
int rval;
+ struct qla_hw_data *ha = vha->hw;
/* Suspend HBA. */
- scsi_block_requests(ha->host);
+ scsi_block_requests(vha->host);
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with write. */
- rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2,
+ rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
length >> 2);
/* Resume HBA -- RISC reset needed. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
- set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
- qla2xxx_wake_dpc(ha);
- qla2x00_wait_for_hba_online(ha);
- scsi_unblock_requests(ha->host);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ scsi_unblock_requests(vha->host);
return rval;
}
uint8_t *
-qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
+qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
uint32_t offset, uint32_t length)
{
int rval;
@@ -2200,6 +2220,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
void *optrom;
uint8_t *pbuf;
uint32_t faddr, left, burst;
+ struct qla_hw_data *ha = vha->hw;
if (offset & 0xfff)
goto slow_read;
@@ -2224,7 +2245,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
if (burst > left)
burst = left;
- rval = qla2x00_dump_ram(ha, optrom_dma,
+ rval = qla2x00_dump_ram(vha, optrom_dma,
flash_data_to_access_addr(faddr), burst);
if (rval) {
qla_printk(KERN_WARNING, ha,
@@ -2253,7 +2274,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
return buf;
slow_read:
- return qla24xx_read_optrom_data(ha, buf, offset, length);
+ return qla24xx_read_optrom_data(vha, buf, offset, length);
}
/**
@@ -2275,7 +2296,7 @@ slow_read:
* Returns QLA_SUCCESS on successful retrieval of version.
*/
static void
-qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
+qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
{
int ret = QLA_FUNCTION_FAILED;
uint32_t istart, iend, iter, vend;
@@ -2349,13 +2370,14 @@ qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
}
int
-qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
+qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
uint8_t code_type, last_image;
uint32_t pcihdr, pcids;
uint8_t *dbyte;
uint16_t *dcode;
+ struct qla_hw_data *ha = vha->hw;
if (!ha->pio_address || !mbuf)
return QLA_FUNCTION_FAILED;
@@ -2375,8 +2397,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
/* No signature */
- DEBUG2(printk("scsi(%ld): No matching ROM "
- "signature.\n", ha->host_no));
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
+ "signature.\n"));
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2392,8 +2414,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
/* Incorrect header. */
- DEBUG2(printk("%s(): PCI data struct not found "
- "pcir_adr=%x.\n", __func__, pcids));
+ DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
+ "found pcir_adr=%x.\n", pcids));
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2407,7 +2429,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->bios_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]));
break;
case ROM_CODE_TYPE_FCODE:
@@ -2421,12 +2443,12 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->efi_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]));
break;
default:
- DEBUG2(printk("%s(): Unrecognized code type %x at "
- "pcids %x.\n", __func__, code_type, pcids));
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
+ "type %x at pcids %x.\n", code_type, pcids));
break;
}
@@ -2446,16 +2468,16 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
- DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
- __func__, ha->host_no));
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
+ "flash:\n"));
DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(printk("%s(): Unrecognized fw revision at "
- "%x.\n", __func__, ha->flt_region_fw * 4));
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
+ "revision at %x.\n", ha->flt_region_fw * 4));
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2470,7 +2492,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
}
int
-qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
+qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
uint32_t pcihdr, pcids;
@@ -2478,6 +2500,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
uint8_t *bcode;
uint8_t code_type, last_image;
int i;
+ struct qla_hw_data *ha = vha->hw;
if (!mbuf)
return QLA_FUNCTION_FAILED;
@@ -2494,12 +2517,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
- qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
+ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
bcode = mbuf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
/* No signature */
- DEBUG2(printk("scsi(%ld): No matching ROM "
- "signature.\n", ha->host_no));
+ DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
+ "signature.\n"));
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2507,15 +2530,15 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
/* Locate PCI data structure. */
pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
- qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
+ qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
bcode = mbuf + (pcihdr % 4);
/* Validate signature of PCI data structure. */
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
/* Incorrect header. */
- DEBUG2(printk("%s(): PCI data struct not found "
- "pcir_adr=%x.\n", __func__, pcids));
+ DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
+ "found pcir_adr=%x.\n", pcids));
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2527,26 +2550,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] = bcode[0x12];
ha->bios_revision[1] = bcode[0x13];
- DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__,
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]));
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
ha->fcode_revision[0] = bcode[0x12];
ha->fcode_revision[1] = bcode[0x13];
- DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__,
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
ha->fcode_revision[1], ha->fcode_revision[0]));
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] = bcode[0x12];
ha->efi_revision[1] = bcode[0x13];
- DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__,
+ DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]));
break;
default:
- DEBUG2(printk("%s(): Unrecognized code type %x at "
- "pcids %x.\n", __func__, code_type, pcids));
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
+ "type %x at pcids %x.\n", code_type, pcids));
break;
}
@@ -2560,7 +2583,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
- qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
@@ -2568,8 +2591,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
- __func__, ha->flt_region_fw));
+ DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
+ "revision at %x.\n", ha->flt_region_fw * 4));
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
@@ -2598,8 +2621,9 @@ qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
}
int
-qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
+qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
{
+ struct qla_hw_data *ha = vha->hw;
uint8_t *pos = ha->vpd;
uint8_t *end = pos + ha->vpd_size;
int len = 0;
@@ -2626,9 +2650,10 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
}
static int
-qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
+qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
{
uint32_t d[2], faddr;
+ struct qla_hw_data *ha = vha->hw;
/* Locate first empty entry. */
for (;;) {
@@ -2639,7 +2664,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
return QLA_MEMORY_ALLOC_FAILED;
}
- qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2);
+ qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
faddr = flash_data_to_access_addr(ha->hw_event_ptr);
ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
@@ -2660,12 +2685,12 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
}
int
-qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
+qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
uint16_t d2, uint16_t d3)
{
#define QMARK(a, b, c, d) \
cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
-
+ struct qla_hw_data *ha = vha->hw;
int rval;
uint32_t marker[2], fdata[4];
@@ -2686,7 +2711,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
/* Locate marker. */
ha->hw_event_ptr = ha->flt_region_hw_event;
for (;;) {
- qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
+ qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
4);
if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
fdata[1] == __constant_cpu_to_le32(0xffffffff))
@@ -2705,7 +2730,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
}
/* No marker, write it. */
if (!ha->flags.hw_event_marker_found) {
- rval = qla2xxx_hw_event_store(ha, marker);
+ rval = qla2xxx_hw_event_store(vha, marker);
if (rval != QLA_SUCCESS) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Failed marker write=%x.!\n",
@@ -2719,7 +2744,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
/* Store error. */
fdata[0] = cpu_to_le32(code << 16 | d1);
fdata[1] = cpu_to_le32(d2 << 16 | d3);
- rval = qla2xxx_hw_event_store(ha, fdata);
+ rval = qla2xxx_hw_event_store(vha, fdata);
if (rval != QLA_SUCCESS) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"HW event -- Failed error write=%x.!\n",
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index be5e299df528..be22f3a09f8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.01-k8"
+#define QLA2XXX_VERSION "8.02.03-k1"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 1
+#define QLA_DRIVER_PATCH_VER 3
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a91a57c57bff..799120fcb9be 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
- cmd->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/*
* Mark device missing so that we won't continue to send
@@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
- cmd->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
case SCS_QUEUE_FULL:
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index de8279ad7d89..eb3a414b189a 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -206,8 +206,7 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
break;
case ISCSI_PARAM_CONN_ADDRESS:
/* TODO: what are the ipv6 bits */
- len = sprintf(buf, "%u.%u.%u.%u\n",
- NIPQUAD(ddb_entry->ip_addr));
+ len = sprintf(buf, "%pI4\n", &ddb_entry->ip_addr);
break;
default:
return -ENOSYS;
@@ -353,7 +352,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
ha->host_no, ddb_entry->bus, ddb_entry->target,
ddb_entry->fw_ddb_index));
iscsi_block_session(ddb_entry->sess);
- iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
}
static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@@ -439,7 +438,7 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
cmd->result = DID_NO_CONNECT << 16;
goto qc_fail_command;
}
- goto qc_host_busy;
+ return SCSI_MLQUEUE_TARGET_BUSY;
}
if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index de7b3bc2cbc9..1ad51552d6b1 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -23,7 +23,7 @@
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
- Cleaned up 26/10/2002 by Alan Cox <alan@redhat.com> as part of the 2.5
+ Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- Non terminating hardware waits
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ac3cb2b9081..f8b79d401d58 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(host->host_lock, flags);
if (rtn) {
- scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
- rtn : SCSI_MLQUEUE_HOST_BUSY);
+ if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
+ rtn != SCSI_MLQUEUE_TARGET_BUSY)
+ rtn = SCSI_MLQUEUE_HOST_BUSY;
+
+ scsi_queue_insert(cmd, rtn);
+
SCSI_LOG_MLQUEUE(3,
printk("queuecommand : request rejected\n"));
}
@@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
void scsi_finish_command(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
+ struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
struct scsi_driver *drv;
unsigned int good_bytes;
@@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
* XXX(hch): What about locking?
*/
shost->host_blocked = 0;
+ starget->target_blocked = 0;
sdev->device_blocked = 0;
/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index fecefa05cb62..381838ebd460 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -136,7 +136,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
else
eh_timed_out = NULL;
- if (eh_timed_out)
+ if (eh_timed_out) {
rtn = eh_timed_out(scmd);
switch (rtn) {
case BLK_EH_NOT_HANDLED:
@@ -144,6 +144,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
default:
return rtn;
}
+ }
if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
scmd->result |= DID_TIME_OUT << 16;
@@ -932,8 +933,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
int i, rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
- rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
- scmd->device->timeout, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
if (rtn == SUCCESS)
return 0;
@@ -1065,10 +1065,10 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *tgtr_scmd, *next;
- unsigned int id;
+ unsigned int id = 0;
int rtn;
- for (id = 0; id <= shost->max_id; id++) {
+ do {
tgtr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) {
if (id == scmd_id(scmd)) {
@@ -1076,8 +1076,18 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
break;
}
}
+ if (!tgtr_scmd) {
+ /* not one exactly equal; find the next highest */
+ list_for_each_entry(scmd, work_q, eh_entry) {
+ if (scmd_id(scmd) > id &&
+ (!tgtr_scmd ||
+ scmd_id(tgtr_scmd) > scmd_id(scmd)))
+ tgtr_scmd = scmd;
+ }
+ }
if (!tgtr_scmd)
- continue;
+ /* no more commands, that's it */
+ break;
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
"to target %d\n",
@@ -1096,7 +1106,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
" failed target: "
"%d\n",
current->comm, id));
- }
+ id++;
+ } while(id != 0);
return list_empty(work_q);
}
@@ -1219,6 +1230,40 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
}
/**
+ * scsi_noretry_cmd - determinte if command should be failed fast
+ * @scmd: SCSI cmd to examine.
+ */
+int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+{
+ switch (host_byte(scmd->result)) {
+ case DID_OK:
+ break;
+ case DID_BUS_BUSY:
+ return blk_failfast_transport(scmd->request);
+ case DID_PARITY:
+ return blk_failfast_dev(scmd->request);
+ case DID_ERROR:
+ if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
+ status_byte(scmd->result) == RESERVATION_CONFLICT)
+ return 0;
+ /* fall through */
+ case DID_SOFT_ERROR:
+ return blk_failfast_driver(scmd->request);
+ }
+
+ switch (status_byte(scmd->result)) {
+ case CHECK_CONDITION:
+ /*
+ * assume caller has checked sense and determinted
+ * the check condition was retryable.
+ */
+ return blk_failfast_dev(scmd->request);
+ }
+
+ return 0;
+}
+
+/**
* scsi_decide_disposition - Disposition a cmd on return from LLD.
* @scmd: SCSI cmd to examine.
*
@@ -1290,7 +1335,21 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case DID_REQUEUE:
return ADD_TO_MLQUEUE;
-
+ case DID_TRANSPORT_DISRUPTED:
+ /*
+ * LLD/transport was disrupted during processing of the IO.
+ * The transport class is now blocked/blocking,
+ * and the transport will decide what to do with the IO
+ * based on its timers and recovery capablilities if
+ * there are enough retries.
+ */
+ goto maybe_retry;
+ case DID_TRANSPORT_FAILFAST:
+ /*
+ * The transport decided to failfast the IO (most likely
+ * the fast io fail tmo fired), so send IO directly upwards.
+ */
+ return SUCCESS;
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
@@ -1347,8 +1406,9 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
return ADD_TO_MLQUEUE;
case GOOD:
case COMMAND_TERMINATED:
- case TASK_ABORTED:
return SUCCESS;
+ case TASK_ABORTED:
+ goto maybe_retry;
case CHECK_CONDITION:
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
@@ -1383,7 +1443,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
if ((++scmd->retries) <= scmd->allowed
- && !blk_noretry_request(scmd->request)) {
+ && !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@@ -1508,7 +1568,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !blk_noretry_request(scmd->request) &&
+ !scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
" retry cmd: %p\n",
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 28b19ef26309..2ae4f8fc5831 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -94,7 +94,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
- &sshdr, timeout, retries);
+ &sshdr, timeout, retries, NULL);
SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
@@ -237,7 +237,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
case SCSI_IOCTL_SEND_COMMAND:
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- return sg_scsi_ioctl(NULL, sdev->request_queue, NULL, arg);
+ return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg);
case SCSI_IOCTL_DOORLOCK:
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
case SCSI_IOCTL_DOORUNLOCK:
@@ -270,21 +270,21 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
EXPORT_SYMBOL(scsi_ioctl);
/**
- * scsi_nonblock_ioctl() - Handle SG_SCSI_RESET
+ * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET
* @sdev: scsi device receiving ioctl
* @cmd: Must be SC_SCSI_RESET
* @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST}
- * @filp: either NULL or a &struct file which must have the O_NONBLOCK flag.
+ * @ndelay: file mode O_NDELAY flag
*/
int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
- void __user *arg, struct file *filp)
+ void __user *arg, int ndelay)
{
int val, result;
/* The first set of iocts may be executed even if we're doing
* error processing, as long as the device was opened
* non-blocking */
- if (filp && (filp->f_flags & O_NONBLOCK)) {
+ if (ndelay) {
if (scsi_host_in_recovery(sdev->host))
return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev))
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 98ee55ced592..f2f51e0333eb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
+ struct scsi_target *starget = scsi_target(device);
struct request_queue *q = device->request_queue;
unsigned long flags;
@@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
- if (reason == SCSI_MLQUEUE_HOST_BUSY)
+ switch (reason) {
+ case SCSI_MLQUEUE_HOST_BUSY:
host->host_blocked = host->max_host_blocked;
- else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
+ break;
+ case SCSI_MLQUEUE_DEVICE_BUSY:
device->device_blocked = device->max_device_blocked;
+ break;
+ case SCSI_MLQUEUE_TARGET_BUSY:
+ starget->target_blocked = starget->max_target_blocked;
+ break;
+ }
/*
* Decrement the counters, since these commands are no longer
@@ -175,13 +183,15 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: or into request flags;
+ * @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, int timeout, int retries, int flags)
+ unsigned char *sense, int timeout, int retries, int flags,
+ int *resid)
{
struct request *req;
int write = (data_direction == DMA_TO_DEVICE);
@@ -216,6 +226,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+ if (resid)
+ *resid = req->data_len;
ret = req->errors;
out:
blk_put_request(req);
@@ -227,7 +239,8 @@ EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
- struct scsi_sense_hdr *sshdr, int timeout, int retries)
+ struct scsi_sense_hdr *sshdr, int timeout, int retries,
+ int *resid)
{
char *sense = NULL;
int result;
@@ -238,7 +251,7 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
return DRIVER_ERROR << 24;
}
result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
- sense, timeout, retries, 0);
+ sense, timeout, retries, 0, resid);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
@@ -460,10 +473,12 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
+ starget->target_busy--;
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled)))
scsi_eh_wakeup(shost);
@@ -519,6 +534,30 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
}
+static inline int scsi_device_is_busy(struct scsi_device *sdev)
+{
+ if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
+ return 1;
+
+ return 0;
+}
+
+static inline int scsi_target_is_busy(struct scsi_target *starget)
+{
+ return ((starget->can_queue > 0 &&
+ starget->target_busy >= starget->can_queue) ||
+ starget->target_blocked);
+}
+
+static inline int scsi_host_is_busy(struct Scsi_Host *shost)
+{
+ if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
+ shost->host_blocked || shost->host_self_blocked)
+ return 1;
+
+ return 0;
+}
+
/*
* Function: scsi_run_queue()
*
@@ -535,17 +574,16 @@ static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
+ LIST_HEAD(starved_list);
unsigned long flags;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
spin_lock_irqsave(shost->host_lock, flags);
- while (!list_empty(&shost->starved_list) &&
- !shost->host_blocked && !shost->host_self_blocked &&
- !((shost->can_queue > 0) &&
- (shost->host_busy >= shost->can_queue))) {
+ list_splice_init(&shost->starved_list, &starved_list);
+ while (!list_empty(&starved_list)) {
int flagset;
/*
@@ -558,9 +596,18 @@ static void scsi_run_queue(struct request_queue *q)
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
- sdev = list_entry(shost->starved_list.next,
- struct scsi_device, starved_entry);
+ if (scsi_host_is_busy(shost))
+ break;
+
+ sdev = list_entry(starved_list.next,
+ struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
+ if (scsi_target_is_busy(scsi_target(sdev))) {
+ list_move_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ continue;
+ }
+
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
@@ -575,14 +622,9 @@ static void scsi_run_queue(struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
- if (unlikely(!list_empty(&sdev->starved_entry)))
- /*
- * sdev lost a race, and was put back on the
- * starved list. This is unlikely but without this
- * in theory we could loop forever.
- */
- break;
}
+ /* put any unprocessed entries back */
+ list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
blk_run_queue(q);
@@ -611,8 +653,8 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
struct request *req = cmd->request;
unsigned long flags;
- scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags);
+ scsi_unprep_request(req);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -681,7 +723,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
leftover = req->data_len;
/* kill remainder if no retrys */
- if (error && blk_noretry_request(req))
+ if (error && scsi_noretry_cmd(cmd))
blk_end_request(req, error, leftover);
else {
if (requeue) {
@@ -838,16 +880,24 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
* (the normal case for most drivers), we don't need
* the logic to deal with cleaning up afterwards.
*
- * We must do one of several things here:
+ * We must call scsi_end_request(). This will finish off
+ * the specified number of sectors. If we are done, the
+ * command block will be released and the queue function
+ * will be goosed. If we are not done then we have to
+ * figure out what to do next:
+ *
+ * a) We can call scsi_requeue_command(). The request
+ * will be unprepared and put back on the queue. Then
+ * a new command will be created for it. This should
+ * be used if we made forward progress, or if we want
+ * to switch from READ(10) to READ(6) for example.
*
- * a) Call scsi_end_request. This will finish off the
- * specified number of sectors. If we are done, the
- * command block will be released, and the queue
- * function will be goosed. If we are not done, then
- * scsi_end_request will directly goose the queue.
+ * b) We can call scsi_queue_insert(). The request will
+ * be put back on the queue and retried using the same
+ * command as before, possibly after a delay.
*
- * b) We can just use scsi_requeue_command() here. This would
- * be used if we just wanted to retry, for example.
+ * c) We can call blk_end_request() with -EIO to fail
+ * the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
@@ -859,6 +909,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
int sense_deferred = 0;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
+ ACTION_DELAYED_RETRY} action;
+ char *description = NULL;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
@@ -910,10 +963,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
return;
this_count = blk_rq_bytes(req);
- /* good_bytes = 0, or (inclusive) there were leftovers and
- * result = 0, so scsi_end_request couldn't retry.
- */
- if (sense_valid && !sense_deferred) {
+ if (host_byte(result) == DID_RESET) {
+ /* Third party bus reset or reset for error recovery
+ * reasons. Just retry the command and see what
+ * happens.
+ */
+ action = ACTION_RETRY;
+ } else if (sense_valid && !sense_deferred) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
if (cmd->device->removable) {
@@ -921,16 +977,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* and quietly refuse further access.
*/
cmd->device->changed = 1;
- scsi_end_request(cmd, -EIO, this_count, 1);
- return;
+ description = "Media Changed";
+ action = ACTION_FAIL;
} else {
/* Must have been a power glitch, or a
* bus reset. Could not have been a
* media change, so we just retry the
- * request and see what happens.
+ * command and see what happens.
*/
- scsi_requeue_command(q, cmd);
- return;
+ action = ACTION_RETRY;
}
break;
case ILLEGAL_REQUEST:
@@ -946,21 +1001,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
+ /* This will issue a new 6-byte command. */
cmd->device->use_10_for_rw = 0;
- /* This will cause a retry with a
- * 6-byte command.
- */
- scsi_requeue_command(q, cmd);
- } else if (sshdr.asc == 0x10) /* DIX */
- scsi_end_request(cmd, -EIO, this_count, 0);
- else
- scsi_end_request(cmd, -EIO, this_count, 1);
- return;
+ action = ACTION_REPREP;
+ } else
+ action = ACTION_FAIL;
+ break;
case ABORTED_COMMAND:
if (sshdr.asc == 0x10) { /* DIF */
- scsi_end_request(cmd, -EIO, this_count, 0);
- return;
- }
+ action = ACTION_FAIL;
+ description = "Data Integrity Failure";
+ } else
+ action = ACTION_RETRY;
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -975,49 +1027,57 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
- scsi_requeue_command(q, cmd);
- return;
- default:
+ action = ACTION_DELAYED_RETRY;
break;
}
+ } else {
+ description = "Device not ready";
+ action = ACTION_FAIL;
}
- if (!(req->cmd_flags & REQ_QUIET))
- scsi_cmd_print_sense_hdr(cmd,
- "Device not ready",
- &sshdr);
-
- scsi_end_request(cmd, -EIO, this_count, 1);
- return;
+ break;
case VOLUME_OVERFLOW:
- if (!(req->cmd_flags & REQ_QUIET)) {
- scmd_printk(KERN_INFO, cmd,
- "Volume overflow, CDB: ");
- __scsi_print_command(cmd->cmnd);
- scsi_print_sense("", cmd);
- }
/* See SSC3rXX or current. */
- scsi_end_request(cmd, -EIO, this_count, 1);
- return;
+ action = ACTION_FAIL;
+ break;
default:
+ description = "Unhandled sense code";
+ action = ACTION_FAIL;
break;
}
+ } else {
+ description = "Unhandled error code";
+ action = ACTION_FAIL;
}
- if (host_byte(result) == DID_RESET) {
- /* Third party bus reset or reset for error recovery
- * reasons. Just retry the request and see what
- * happens.
- */
- scsi_requeue_command(q, cmd);
- return;
- }
- if (result) {
+
+ switch (action) {
+ case ACTION_FAIL:
+ /* Give up and fail the remainder of the request */
if (!(req->cmd_flags & REQ_QUIET)) {
+ if (description)
+ scmd_printk(KERN_INFO, cmd, "%s",
+ description);
scsi_print_result(cmd);
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
}
+ blk_end_request(req, -EIO, blk_rq_bytes(req));
+ scsi_next_command(cmd);
+ break;
+ case ACTION_REPREP:
+ /* Unprep the request and put it back at the head of the queue.
+ * A new command will be prepared and issued.
+ */
+ scsi_requeue_command(q, cmd);
+ break;
+ case ACTION_RETRY:
+ /* Retry the same command immediately */
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+ break;
+ case ACTION_DELAYED_RETRY:
+ /* Retry the same command after a delay */
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
}
- scsi_end_request(cmd, -EIO, this_count, !result);
}
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1323,8 +1383,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
- if (sdev->device_busy >= sdev->queue_depth)
- return 0;
if (sdev->device_busy == 0 && sdev->device_blocked) {
/*
* unblock after device_blocked iterates to zero
@@ -1338,12 +1396,58 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
return 0;
}
}
- if (sdev->device_blocked)
+ if (scsi_device_is_busy(sdev))
return 0;
return 1;
}
+
+/*
+ * scsi_target_queue_ready: checks if there we can send commands to target
+ * @sdev: scsi device on starget to check.
+ *
+ * Called with the host lock held.
+ */
+static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
+ struct scsi_device *sdev)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+
+ if (starget->single_lun) {
+ if (starget->starget_sdev_user &&
+ starget->starget_sdev_user != sdev)
+ return 0;
+ starget->starget_sdev_user = sdev;
+ }
+
+ if (starget->target_busy == 0 && starget->target_blocked) {
+ /*
+ * unblock after target_blocked iterates to zero
+ */
+ if (--starget->target_blocked == 0) {
+ SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
+ "unblocking target at zero depth\n"));
+ } else {
+ blk_plug_device(sdev->request_queue);
+ return 0;
+ }
+ }
+
+ if (scsi_target_is_busy(starget)) {
+ if (list_empty(&sdev->starved_entry)) {
+ list_add_tail(&sdev->starved_entry,
+ &shost->starved_list);
+ return 0;
+ }
+ }
+
+ /* We're OK to process the command, so we can't be starved */
+ if (!list_empty(&sdev->starved_entry))
+ list_del_init(&sdev->starved_entry);
+ return 1;
+}
+
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
@@ -1369,8 +1473,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
return 0;
}
}
- if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
- shost->host_blocked || shost->host_self_blocked) {
+ if (scsi_host_is_busy(shost)) {
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
@@ -1384,12 +1487,44 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
}
/*
+ * Busy state exporting function for request stacking drivers.
+ *
+ * For efficiency, no lock is taken to check the busy state of
+ * shost/starget/sdev, since the returned value is not guaranteed and
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os
+ * (e.g. !sdev), scsi needs to return 'not busy'.
+ * Otherwise, request stacking drivers may hold requests forever.
+ */
+static int scsi_lld_busy(struct request_queue *q)
+{
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+ struct scsi_target *starget;
+
+ if (!sdev)
+ return 0;
+
+ shost = sdev->host;
+ starget = scsi_target(sdev);
+
+ if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
+ scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+ return 1;
+
+ return 0;
+}
+
+/*
* Kill a request for a dead device
*/
static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
+ struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
blkdev_dequeue_request(req);
@@ -1413,6 +1548,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
spin_unlock(sdev->request_queue->queue_lock);
spin_lock(shost->host_lock);
shost->host_busy++;
+ starget->target_busy++;
spin_unlock(shost->host_lock);
spin_lock(sdev->request_queue->queue_lock);
@@ -1550,14 +1686,13 @@ static void scsi_request_fn(struct request_queue *q)
goto not_ready;
}
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto not_ready;
+
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
- if (scsi_target(sdev)->single_lun) {
- if (scsi_target(sdev)->starget_sdev_user &&
- scsi_target(sdev)->starget_sdev_user != sdev)
- goto not_ready;
- scsi_target(sdev)->starget_sdev_user = sdev;
- }
+
+ scsi_target(sdev)->target_busy++;
shost->host_busy++;
/*
@@ -1685,6 +1820,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
blk_queue_rq_timed_out(q, scsi_times_out);
+ blk_queue_lld_busy(q, scsi_lld_busy);
return q;
}
@@ -1885,7 +2021,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
}
ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
- sshdr, timeout, retries);
+ sshdr, timeout, retries, NULL);
kfree(real_buffer);
return ret;
}
@@ -1950,7 +2086,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
memset(buffer, 0, len);
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
- sshdr, timeout, retries);
+ sshdr, timeout, retries, NULL);
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
@@ -2032,23 +2168,22 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
- timeout, retries);
- } while ((driver_byte(result) & DRIVER_SENSE) &&
- sshdr && sshdr->sense_key == UNIT_ATTENTION &&
- --retries);
+ timeout, retries, NULL);
+ if (sdev->removable && scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION)
+ sdev->changed = 1;
+ } while (scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION && --retries);
if (!sshdr)
/* could not allocate sense buffer, so can't process it */
return result;
- if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
-
- if ((scsi_sense_valid(sshdr)) &&
- ((sshdr->sense_key == UNIT_ATTENTION) ||
- (sshdr->sense_key == NOT_READY))) {
- sdev->changed = 1;
- result = 0;
- }
+ if (sdev->removable && scsi_sense_valid(sshdr) &&
+ (sshdr->sense_key == UNIT_ATTENTION ||
+ sshdr->sense_key == NOT_READY)) {
+ sdev->changed = 1;
+ result = 0;
}
if (!sshdr_external)
kfree(sshdr);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index b37e133de805..723fdecd91bd 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -205,16 +205,13 @@ static struct notifier_block scsi_netlink_notifier = {
};
-/**
+/*
* GENERIC SCSI transport receive and event handlers
- **/
+ */
/**
- * scsi_generic_msg_handler - receive message handler for GENERIC transport
- * messages
- *
+ * scsi_generic_msg_handler - receive message handler for GENERIC transport messages
* @skb: socket receive buffer
- *
**/
static int
scsi_generic_msg_handler(struct sk_buff *skb)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 6cddd5dd323c..e1850904ff73 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
+int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 334862e26a1b..18486b51668d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -216,7 +216,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
scsi_cmd[4] = 0x2a; /* size */
scsi_cmd[5] = 0;
scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
- SCSI_TIMEOUT, 3);
+ SCSI_TIMEOUT, 3, NULL);
}
/**
@@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
dev->type = &scsi_target_type;
starget->id = id;
starget->channel = channel;
+ starget->can_queue = 0;
INIT_LIST_HEAD(&starget->siblings);
INIT_LIST_HEAD(&starget->devices);
starget->state = STARGET_CREATED;
@@ -572,6 +573,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* Each pass gets up to three chances to ignore Unit Attention */
for (count = 0; count < 3; ++count) {
+ int resid;
+
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len;
@@ -580,7 +583,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
inq_result, try_inquiry_len, &sshdr,
- HZ / 2 + HZ * scsi_inq_timeout, 3);
+ HZ / 2 + HZ * scsi_inq_timeout, 3,
+ &resid);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
"with code 0x%x\n",
@@ -601,6 +605,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
(sshdr.ascq == 0))
continue;
}
+ } else {
+ /*
+ * if nothing was transferred, we try
+ * again. It's a workaround for some USB
+ * devices.
+ */
+ if (resid == try_inquiry_len)
+ continue;
}
break;
}
@@ -1389,7 +1401,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
lun_data, length, &sshdr,
- SCSI_TIMEOUT + 4 * HZ, 3);
+ SCSI_TIMEOUT + 4 * HZ, 3, NULL);
SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
" %s (try %d) result 0x%x\n", result
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d5f7653bb94b..062304de4854 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2133,8 +2133,7 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
- if (ft->terminate_rport_io)
- SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
BUG_ON(count > FC_RPORT_NUM_ATTRS);
@@ -2328,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(fc_remove_host);
+static void fc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct Scsi_Host *shost = rport_to_shost(rport);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
+
+ /* Involve the LLDD if possible to terminate all io on the rport. */
+ if (i->f->terminate_rport_io)
+ i->f->terminate_rport_io(rport);
+
+ /*
+ * must unblock to flush queued IO. The caller will have set
+ * the port_state or flags, so that fc_remote_port_chkready will
+ * fail IO.
+ */
+ scsi_target_unblock(&rport->dev);
+}
/**
* fc_starget_delete - called to delete the scsi decendents of an rport
@@ -2340,13 +2355,8 @@ fc_starget_delete(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, stgt_delete_work);
- struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
-
- /* Involve the LLDD if possible to terminate all io on the rport. */
- if (i->f->terminate_rport_io)
- i->f->terminate_rport_io(rport);
+ fc_terminate_rport_io(rport);
scsi_remove_target(&rport->dev);
}
@@ -2372,10 +2382,7 @@ fc_rport_final_delete(struct work_struct *work)
if (rport->flags & FC_RPORT_SCAN_PENDING)
scsi_flush_work(shost);
- /* involve the LLDD to terminate all pending i/o */
- if (i->f->terminate_rport_io)
- i->f->terminate_rport_io(rport);
-
+ fc_terminate_rport_io(rport);
/*
* Cancel any outstanding timers. These should really exist
* only when rmmod'ing the LLDD and we're asking for
@@ -2639,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
- rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING);
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
@@ -2702,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
rport->port_id = ids->port_id;
rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
if (fci->f->dd_fcrport_size)
memset(rport->dd_data, 0,
@@ -2784,7 +2793,6 @@ void
fc_remote_port_delete(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
int timeout = rport->dev_loss_tmo;
unsigned long flags;
@@ -2830,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *rport)
/* see if we need to kill io faster than waiting for device loss */
if ((rport->fast_io_fail_tmo != -1) &&
- (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
+ (rport->fast_io_fail_tmo < timeout))
fc_queue_devloss_work(shost, &rport->fail_io_work,
rport->fast_io_fail_tmo * HZ);
@@ -2906,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
fc_flush_devloss(shost);
spin_lock_irqsave(shost->host_lock, flags);
- rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
+ rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
+ FC_RPORT_DEVLOSS_PENDING);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
@@ -3001,6 +3010,17 @@ fc_timeout_deleted_rport(struct work_struct *work)
rport->supported_classes = FC_COS_UNSPECIFIED;
rport->roles = FC_PORT_ROLE_UNKNOWN;
rport->port_state = FC_PORTSTATE_NOTPRESENT;
+ rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
+
+ /*
+ * Pre-emptively kill I/O rather than waiting for the work queue
+ * item to teardown the starget. (FCOE libFC folks prefer this
+ * and to have the rport_port_id still set when it's done).
+ */
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ fc_terminate_rport_io(rport);
+
+ BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT);
/* remove the identifiers that aren't used in the consisting binding */
switch (fc_host->tgtid_bind_type) {
@@ -3025,9 +3045,6 @@ fc_timeout_deleted_rport(struct work_struct *work)
* went away and didn't come back - we'll remove
* all attached scsi devices.
*/
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- scsi_target_unblock(&rport->dev);
fc_queue_work(shost, &rport->stgt_delete_work);
}
@@ -3043,13 +3060,12 @@ fc_timeout_fail_rport_io(struct work_struct *work)
{
struct fc_rport *rport =
container_of(work, struct fc_rport, fail_io_work.work);
- struct Scsi_Host *shost = rport_to_shost(rport);
- struct fc_internal *i = to_fc_internal(shost->transportt);
if (rport->port_state != FC_PORTSTATE_BLOCKED)
return;
- i->f->terminate_rport_io(rport);
+ rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
+ fc_terminate_rport_io(rport);
}
/**
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0ce5f7cdfe2a..4a803ebaf508 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -138,7 +138,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sprintf(buf, "%u\n", ep->id);
+ return sprintf(buf, "%llu\n", (unsigned long long) ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -156,7 +156,7 @@ static struct attribute_group iscsi_endpoint_group = {
static int iscsi_match_epid(struct device *dev, void *data)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- unsigned int *epid = (unsigned int *) data;
+ uint64_t *epid = (uint64_t *) data;
return *epid == ep->id;
}
@@ -166,7 +166,7 @@ iscsi_create_endpoint(int dd_size)
{
struct device *dev;
struct iscsi_endpoint *ep;
- unsigned int id;
+ uint64_t id;
int err;
for (id = 1; id < ISCSI_MAX_EPID; id++) {
@@ -187,7 +187,8 @@ iscsi_create_endpoint(int dd_size)
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+ snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%llu",
+ (unsigned long long) id);
err = device_register(&ep->dev);
if (err)
goto free_ep;
@@ -374,10 +375,10 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
err = 0;
break;
case ISCSI_SESSION_FAILED:
- err = DID_IMM_RETRY << 16;
+ err = DID_TRANSPORT_DISRUPTED << 16;
break;
case ISCSI_SESSION_FREE:
- err = DID_NO_CONNECT << 16;
+ err = DID_TRANSPORT_FAILFAST << 16;
break;
default:
err = DID_NO_CONNECT << 16;
@@ -1010,7 +1011,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
- iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_error_event(conn, ISCSI_ERR_CONN_FAILED);
iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
"control PDU: OOM\n");
return -ENOMEM;
@@ -1031,7 +1032,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
}
EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
-void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
struct sk_buff *skb;
@@ -1063,7 +1064,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
error);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_error);
+EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
static int
iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index b29360ed0bdc..f49f55c6bfc8 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -109,8 +109,11 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
for(i = 0; i < DV_RETRIES; i++) {
result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
- REQ_FAILFAST);
- if (result & DRIVER_SENSE) {
+ REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER,
+ NULL);
+ if (driver_byte(result) & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
sshdr = &sshdr_tmp;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a7b53be63367..62b28d58e65e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -384,7 +384,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
sector_t block = rq->sector;
sector_t threshold;
unsigned int this_count = rq->nr_sectors;
- int ret;
+ int ret, host_dif;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
@@ -515,7 +515,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
rq->nr_sectors));
/* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
- if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
+ host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+ if (host_dif)
SCpnt->cmnd[1] = 1 << 5;
else
SCpnt->cmnd[1] = 0;
@@ -573,8 +574,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->sdb.length = this_count * sdp->sector_size;
/* If DIF or DIX is enabled, tell HBA how to handle request */
- if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
- sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
+ if (host_dif || scsi_prot_sg_count(SCpnt))
+ sd_dif_op(SCpnt, host_dif, scsi_prot_sg_count(SCpnt),
+ sdkp->protection_type);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -607,17 +609,15 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
**/
-static int sd_open(struct inode *inode, struct file *filp)
+static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
struct scsi_device *sdev;
int retval;
- if (!(sdkp = scsi_disk_get(disk)))
+ if (!sdkp)
return -ENXIO;
-
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
sdev = sdkp->device;
@@ -631,14 +631,13 @@ static int sd_open(struct inode *inode, struct file *filp)
goto error_out;
if (sdev->removable || sdkp->write_prot)
- check_disk_change(inode->i_bdev);
+ check_disk_change(bdev);
/*
* If the drive is empty, just let the open fail.
*/
retval = -ENOMEDIUM;
- if (sdev->removable && !sdkp->media_present &&
- !(filp->f_flags & O_NDELAY))
+ if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
goto error_out;
/*
@@ -646,7 +645,7 @@ static int sd_open(struct inode *inode, struct file *filp)
* if the user expects to be able to write to the thing.
*/
retval = -EROFS;
- if (sdkp->write_prot && (filp->f_mode & FMODE_WRITE))
+ if (sdkp->write_prot && (mode & FMODE_WRITE))
goto error_out;
/*
@@ -682,9 +681,8 @@ error_out:
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
**/
-static int sd_release(struct inode *inode, struct file *filp)
+static int sd_release(struct gendisk *disk, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdev = sdkp->device;
@@ -741,10 +739,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
* Note: most ioctls are forward onto the block subsystem or further
* down in the scsi subsystem.
**/
-static int sd_ioctl(struct inode * inode, struct file * filp,
+static int sd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
struct scsi_device *sdp = scsi_disk(disk)->device;
void __user *p = (void __user *)arg;
@@ -759,7 +756,8 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
* may try and take the device offline, in which case all further
* access to the device is prohibited.
*/
- error = scsi_nonblockable_ioctl(sdp, cmd, p, filp);
+ error = scsi_nonblockable_ioctl(sdp, cmd, p,
+ (mode & FMODE_NDELAY) != 0);
if (!scsi_block_when_processing_errors(sdp) || !error)
return error;
@@ -773,7 +771,7 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
case SCSI_IOCTL_GET_BUS_NUMBER:
return scsi_ioctl(sdp, cmd, p);
default:
- error = scsi_cmd_ioctl(filp, disk->queue, disk, cmd, p);
+ error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
if (error != -ENOTTY)
return error;
}
@@ -886,7 +884,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* flush everything.
*/
res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES);
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
if (res == 0)
break;
}
@@ -926,11 +924,10 @@ static void sd_rescan(struct device *dev)
* This gets directly called from VFS. When the ioctl
* is not recognized we go back to the other translation paths.
*/
-static long sd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_device *sdev = scsi_disk(disk)->device;
+ struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -960,7 +957,7 @@ static struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
.release = sd_release,
- .ioctl = sd_ioctl,
+ .locked_ioctl = sd_ioctl,
.getgeo = sd_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
@@ -1052,7 +1049,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = sd_completed_bytes(SCpnt);
break;
case RECOVERED_ERROR:
- case NO_SENSE:
/* Inform the user, but make sure that it's not treated
* as a hard error.
*/
@@ -1061,6 +1057,15 @@ static int sd_done(struct scsi_cmnd *SCpnt)
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
good_bytes = scsi_bufflen(SCpnt);
break;
+ case NO_SENSE:
+ /* This indicates a false check condition, so ignore it. An
+ * unknown amount of data was transferred so treat it as an
+ * error.
+ */
+ scsi_print_sense("sd", SCpnt);
+ SCpnt->result = 0;
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ break;
case ABORTED_COMMAND:
if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
scsi_print_result(SCpnt);
@@ -1074,15 +1079,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_print_sense("sd", SCpnt);
good_bytes = sd_completed_bytes(SCpnt);
}
- if (!scsi_device_protection(SCpnt->device) &&
- SCpnt->device->use_10_for_rw &&
- (SCpnt->cmnd[0] == READ_10 ||
- SCpnt->cmnd[0] == WRITE_10))
- SCpnt->device->use_10_for_rw = 0;
- if (SCpnt->device->use_10_for_ms &&
- (SCpnt->cmnd[0] == MODE_SENSE_10 ||
- SCpnt->cmnd[0] == MODE_SELECT_10))
- SCpnt->device->use_10_for_ms = 0;
break;
default:
break;
@@ -1138,7 +1134,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
the_result = scsi_execute_req(sdkp->device, cmd,
DMA_NONE, NULL, 0,
&sshdr, SD_TIMEOUT,
- SD_MAX_RETRIES);
+ SD_MAX_RETRIES, NULL);
/*
* If the drive has indicated to us that it
@@ -1196,7 +1192,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] |= 1 << 4;
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES);
+ SD_TIMEOUT, SD_MAX_RETRIES,
+ NULL);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
}
@@ -1252,14 +1249,12 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
else
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+ sdkp->protection_type = type;
+
switch (type) {
case SD_DIF_TYPE0_PROTECTION:
- sdkp->protection_type = 0;
- break;
-
case SD_DIF_TYPE1_PROTECTION:
case SD_DIF_TYPE3_PROTECTION:
- sdkp->protection_type = type;
break;
case SD_DIF_TYPE2_PROTECTION:
@@ -1277,7 +1272,6 @@ void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
return;
disable:
- sdkp->protection_type = 0;
sdkp->capacity = 0;
}
@@ -1313,7 +1307,7 @@ repeat:
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, longrc ? 13 : 8, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES);
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
if (media_not_present(sdkp, &sshdr))
return;
@@ -1438,7 +1432,7 @@ got_data:
{
char cap_str_2[10], cap_str_10[10];
- u64 sz = sdkp->capacity << ffz(~sector_size);
+ u64 sz = (u64)sdkp->capacity << ilog2(sector_size);
string_get_size(sz, STRING_UNITS_2, cap_str_2,
sizeof(cap_str_2));
@@ -1993,7 +1987,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES);
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
if (res) {
sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
sd_print_result(sdkp, res);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 95b9f06534d5..75638e7d3f66 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -97,19 +97,28 @@ struct sd_dif_tuple {
__be32 ref_tag; /* Target LBA or indirect LBA */
};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
-extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
+extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int, unsigned int);
extern void sd_dif_config_host(struct scsi_disk *);
extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
#else /* CONFIG_BLK_DEV_INTEGRITY */
-#define sd_dif_op(a, b, c) do { } while (0)
-#define sd_dif_config_host(a) do { } while (0)
-#define sd_dif_prepare(a, b, c) (0)
-#define sd_dif_complete(a, b) (0)
+static inline void sd_dif_op(struct scsi_cmnd *cmd, unsigned int a, unsigned int b, unsigned int c)
+{
+}
+static inline void sd_dif_config_host(struct scsi_disk *disk)
+{
+}
+static inline int sd_dif_prepare(struct request *rq, sector_t s, unsigned int a)
+{
+ return 0;
+}
+static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
+{
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4d17f3d35aac..3ebb1f289490 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -311,25 +311,26 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
+ int dif, dix;
- /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
- if (scsi_host_dix_capable(sdp->host, type) == 0) {
+ dif = scsi_host_dif_capable(sdp->host, type);
+ dix = scsi_host_dix_capable(sdp->host, type);
- if (type == SD_DIF_TYPE0_PROTECTION)
- return;
-
- if (scsi_host_dif_capable(sdp->host, type) == 0) {
- sd_printk(KERN_INFO, sdkp, "Type %d protection " \
- "unsupported by HBA. Disabling DIF.\n", type);
- sdkp->protection_type = 0;
- return;
- }
+ if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
+ dif = 0; dix = 1;
+ }
- sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
- type);
+ if (type) {
+ if (dif)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIF Type %d protection\n", type);
+ else
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %d protection\n", type);
+ }
+ if (!dix)
return;
- }
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
@@ -343,17 +344,17 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
else
blk_integrity_register(disk, &dif_type1_integrity_crc);
- sd_printk(KERN_INFO, sdkp,
- "Enabling %s integrity protection\n", disk->integrity->name);
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
- if (type && sdkp->ATO) {
+ if (dif && type && sdkp->ATO) {
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
disk->integrity->tag_size = sizeof(u16);
- sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
+ sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size);
}
}
@@ -361,7 +362,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
/*
* DIF DMA operation magic decoder ring.
*/
-void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
+void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix, unsigned int type)
{
int csum_convert, prot_op;
@@ -406,7 +407,8 @@ void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
}
scsi_set_prot_op(scmd, prot_op);
- scsi_set_prot_type(scmd, dif);
+ if (dif)
+ scsi_set_prot_type(scmd, type);
}
/*
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 1bcf3c33d7ff..7f0df29f3a64 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -77,7 +77,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
};
return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES);
+ NULL, SES_TIMEOUT, SES_RETRIES, NULL);
}
static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -95,7 +95,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
};
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES);
+ NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
@@ -369,7 +369,8 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
return;
if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
- VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
+ VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES,
+ NULL))
goto free;
vpd_len = (buf[2] << 8) + buf[3];
@@ -380,7 +381,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
cmd[3] = vpd_len >> 8;
cmd[4] = vpd_len & 0xff;
if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
- vpd_len, NULL, SES_TIMEOUT, SES_RETRIES))
+ vpd_len, NULL, SES_TIMEOUT, SES_RETRIES, NULL))
goto free;
desc = buf + 4;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ba9b9bbd4e73..5103855242ae 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -327,7 +327,6 @@ sg_release(struct inode *inode, struct file *filp)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
- sg_fasync(-1, filp, 0); /* remove filp from async notification list */
if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
if (!sdp->detached) {
scsi_device_put(sdp->device);
@@ -1059,7 +1058,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (sg_allow_access(filp, &opcode))
return -EPERM;
}
- return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
+ return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1450,12 +1449,10 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
if (sg_sysfs_valid) {
struct device *sg_class_member;
- sg_class_member = device_create_drvdata(sg_sysfs_class,
- cl_dev->parent,
- MKDEV(SCSI_GENERIC_MAJOR,
- sdp->index),
- sdp,
- "%s", disk->disk_name);
+ sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
+ MKDEV(SCSI_GENERIC_MAJOR,
+ sdp->index),
+ sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
printk(KERN_ERR "sg_add: "
"device_create failed\n");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0f17009c99d2..e7fa3caead79 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -177,7 +177,7 @@ int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
do {
the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
0, sshdr, SR_TIMEOUT,
- retries--);
+ retries--, NULL);
if (scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
@@ -471,38 +471,31 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
return scsi_prep_return(q, rq, ret);
}
-static int sr_block_open(struct inode *inode, struct file *file)
+static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
- struct scsi_cd *cd;
- int ret = 0;
-
- if(!(cd = scsi_cd_get(disk)))
- return -ENXIO;
-
- if((ret = cdrom_open(&cd->cdi, inode, file)) != 0)
- scsi_cd_put(cd);
+ struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk);
+ int ret = -ENXIO;
+ if (cd) {
+ ret = cdrom_open(&cd->cdi, bdev, mode);
+ if (ret)
+ scsi_cd_put(cd);
+ }
return ret;
}
-static int sr_block_release(struct inode *inode, struct file *file)
+static int sr_block_release(struct gendisk *disk, fmode_t mode)
{
- int ret;
- struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
- ret = cdrom_release(&cd->cdi, file);
- if(ret)
- return ret;
-
+ struct scsi_cd *cd = scsi_cd(disk);
+ cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
-
return 0;
}
-static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
@@ -517,7 +510,7 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return scsi_ioctl(sdev, cmd, argp);
}
- ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+ ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
return ret;
@@ -527,7 +520,8 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
* case fall through to scsi_ioctl, which will return ENDOEV again
* if it doesn't recognise the ioctl
*/
- ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL);
+ ret = scsi_nonblockable_ioctl(sdev, cmd, argp,
+ (mode & FMODE_NDELAY) != 0);
if (ret != -ENODEV)
return ret;
return scsi_ioctl(sdev, cmd, argp);
@@ -544,7 +538,7 @@ static struct block_device_operations sr_bdops =
.owner = THIS_MODULE,
.open = sr_block_open,
.release = sr_block_release,
- .ioctl = sr_block_ioctl,
+ .locked_ioctl = sr_block_ioctl,
.media_changed = sr_block_media_changed,
/*
* No compat_ioctl for now because sr_block_ioctl never
@@ -687,7 +681,7 @@ static void get_sectorsize(struct scsi_cd *cd)
/* Do the command and wait.. */
the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
buffer, sizeof(buffer), NULL,
- SR_TIMEOUT, MAX_RETRIES);
+ SR_TIMEOUT, MAX_RETRIES, NULL);
retries--;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ae87d08df588..d92ff512d213 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -207,7 +207,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
memset(sense, 0, sizeof(*sense));
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
cgc->buffer, cgc->buflen, (char *)sense,
- cgc->timeout, IOCTL_RETRIES, 0);
+ cgc->timeout, IOCTL_RETRIES, 0, NULL);
scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 4eb3da996b36..4ad3e017213f 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -223,9 +223,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
no_multi = 1;
break;
}
- min = BCD2BIN(buffer[15]);
- sec = BCD2BIN(buffer[16]);
- frame = BCD2BIN(buffer[17]);
+ min = bcd2bin(buffer[15]);
+ sec = bcd2bin(buffer[16]);
+ frame = bcd2bin(buffer[17]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
break;
}
@@ -252,9 +252,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
}
if (rc != 0)
break;
- min = BCD2BIN(buffer[1]);
- sec = BCD2BIN(buffer[2]);
- frame = BCD2BIN(buffer[3]);
+ min = bcd2bin(buffer[1]);
+ sec = bcd2bin(buffer[2]);
+ frame = bcd2bin(buffer[3]);
sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame;
if (sector)
sector -= CD_MSF_OFFSET;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2bb53e3d941..7f3f317ee6ca 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -451,9 +451,23 @@ static void st_sleep_done(void *data, char *sense, int result, int resid)
complete(SRpnt->waiting);
}
-static struct st_request *st_allocate_request(void)
+static struct st_request *st_allocate_request(struct scsi_tape *stp)
{
- return kzalloc(sizeof(struct st_request), GFP_KERNEL);
+ struct st_request *streq;
+
+ streq = kzalloc(sizeof(*streq), GFP_KERNEL);
+ if (streq)
+ streq->stp = stp;
+ else {
+ DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n",
+ tape_name(stp)););
+ if (signal_pending(current))
+ stp->buffer->syscall_result = -EINTR;
+ else
+ stp->buffer->syscall_result = -EBUSY;
+ }
+
+ return streq;
}
static void st_release_request(struct st_request *streq)
@@ -481,18 +495,10 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
return NULL;
}
- if (SRpnt == NULL) {
- SRpnt = st_allocate_request();
- if (SRpnt == NULL) {
- DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
- tape_name(STp)); );
- if (signal_pending(current))
- (STp->buffer)->syscall_result = (-EINTR);
- else
- (STp->buffer)->syscall_result = (-EBUSY);
+ if (!SRpnt) {
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt)
return NULL;
- }
- SRpnt->stp = STp;
}
/* If async IO, set last_SRpnt. This ptr tells write_behind_check
@@ -527,6 +533,28 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
return SRpnt;
}
+static int st_scsi_kern_execute(struct st_request *streq,
+ const unsigned char *cmd, int data_direction,
+ void *buffer, unsigned bufflen, int timeout,
+ int retries)
+{
+ struct scsi_tape *stp = streq->stp;
+ int ret, resid;
+
+ stp->buffer->cmdstat.have_sense = 0;
+ memcpy(streq->cmd, cmd, sizeof(streq->cmd));
+
+ ret = scsi_execute(stp->device, cmd, data_direction, buffer, bufflen,
+ streq->sense, timeout, retries, 0, &resid);
+ if (driver_byte(ret) & DRIVER_ERROR)
+ return -EBUSY;
+
+ stp->buffer->cmdstat.midlevel_result = streq->result = ret;
+ stp->buffer->cmdstat.residual = resid;
+ stp->buffer->syscall_result = st_chk_result(stp, streq);
+
+ return 0;
+}
/* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
write has been correct but EOM early warning reached, -EIO if write ended in
@@ -599,6 +627,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
{
struct st_request *SRpnt;
unsigned char cmd[MAX_COMMAND_SIZE];
+ int ret;
cmd[0] = SPACE;
cmd[1] = 0x01; /* Space FileMarks */
@@ -612,19 +641,26 @@ static int cross_eof(struct scsi_tape * STp, int forward)
DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
tape_name(STp), forward ? "forward" : "backward"));
- SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
- STp->device->timeout, MAX_RETRIES, 1);
+ SRpnt = st_allocate_request(STp);
if (!SRpnt)
- return (STp->buffer)->syscall_result;
+ return STp->buffer->syscall_result;
- st_release_request(SRpnt);
- SRpnt = NULL;
+ ret = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+ STp->device->request_queue->rq_timeout,
+ MAX_RETRIES);
+ if (ret)
+ goto out;
+
+ ret = STp->buffer->syscall_result;
if ((STp->buffer)->cmdstat.midlevel_result != 0)
printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
tape_name(STp), forward ? "forward" : "backward");
- return (STp->buffer)->syscall_result;
+out:
+ st_release_request(SRpnt);
+
+ return ret;
}
@@ -657,7 +693,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
cmd[4] = blks;
SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
- STp->device->timeout, MAX_WRITE_RETRIES, 1);
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES, 1);
if (!SRpnt)
return (STp->buffer)->syscall_result;
@@ -844,21 +881,24 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
unsigned char cmd[MAX_COMMAND_SIZE];
- struct st_request *SRpnt = NULL;
+ struct st_request *SRpnt;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt)
+ return STp->buffer->syscall_result;
+
max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
for (attentions=waits=0; ; ) {
memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
cmd[0] = TEST_UNIT_READY;
- SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
- STp->long_timeout, MAX_READY_RETRIES, 1);
- if (!SRpnt) {
- retval = (STp->buffer)->syscall_result;
+ retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+ STp->long_timeout,
+ MAX_READY_RETRIES);
+ if (retval)
break;
- }
if (cmdstatp->have_sense) {
@@ -902,8 +942,8 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
break;
}
- if (SRpnt != NULL)
- st_release_request(SRpnt);
+ st_release_request(SRpnt);
+
return retval;
}
@@ -980,16 +1020,24 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
}
}
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt) {
+ retval = STp->buffer->syscall_result;
+ goto err_out;
+ }
+
if (STp->omit_blklims)
STp->min_block = STp->max_block = (-1);
else {
memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
cmd[0] = READ_BLOCK_LIMITS;
- SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
- STp->device->timeout, MAX_READY_RETRIES, 1);
- if (!SRpnt) {
- retval = (STp->buffer)->syscall_result;
+ retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+ STp->buffer->b_data, 6,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES);
+ if (retval) {
+ st_release_request(SRpnt);
goto err_out;
}
@@ -1013,10 +1061,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
cmd[0] = MODE_SENSE;
cmd[4] = 12;
- SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
- STp->device->timeout, MAX_READY_RETRIES, 1);
- if (!SRpnt) {
- retval = (STp->buffer)->syscall_result;
+ retval = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+ STp->buffer->b_data, 12,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES);
+ if (retval) {
+ st_release_request(SRpnt);
goto err_out;
}
@@ -1246,10 +1296,17 @@ static int st_flush(struct file *filp, fl_owner_t id)
cmd[0] = WRITE_FILEMARKS;
cmd[4] = 1 + STp->two_fm;
- SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
- STp->device->timeout, MAX_WRITE_RETRIES, 1);
+ SRpnt = st_allocate_request(STp);
if (!SRpnt) {
- result = (STp->buffer)->syscall_result;
+ result = STp->buffer->syscall_result;
+ goto out;
+ }
+
+ result = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0,
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES);
+ if (result) {
+ st_release_request(SRpnt);
goto out;
}
@@ -1634,7 +1691,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
cmd[4] = blks;
SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
- STp->device->timeout, MAX_WRITE_RETRIES, !async_write);
+ STp->device->request_queue->rq_timeout,
+ MAX_WRITE_RETRIES, !async_write);
if (!SRpnt) {
retval = STbp->syscall_result;
goto out;
@@ -1804,7 +1862,8 @@ static long read_tape(struct scsi_tape *STp, long count,
SRpnt = *aSRpnt;
SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
- STp->device->timeout, MAX_RETRIES, 1);
+ STp->device->request_queue->rq_timeout,
+ MAX_RETRIES, 1);
release_buffering(STp, 1);
*aSRpnt = SRpnt;
if (!SRpnt)
@@ -2213,7 +2272,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
(value & ~MT_ST_SET_LONG_TIMEOUT)));
} else {
- STp->device->timeout = value * HZ;
+ blk_queue_rq_timeout(STp->device->request_queue,
+ value * HZ);
DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
name, value) );
}
@@ -2311,7 +2371,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
{
unsigned char cmd[MAX_COMMAND_SIZE];
- struct st_request *SRpnt = NULL;
+ struct st_request *SRpnt;
+ int ret;
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = MODE_SENSE;
@@ -2320,14 +2381,17 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
cmd[2] = page;
cmd[4] = 255;
- SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE,
- STp->device->timeout, 0, 1);
- if (SRpnt == NULL)
- return (STp->buffer)->syscall_result;
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt)
+ return STp->buffer->syscall_result;
+ ret = st_scsi_kern_execute(SRpnt, cmd, DMA_FROM_DEVICE,
+ STp->buffer->b_data, cmd[4],
+ STp->device->request_queue->rq_timeout,
+ MAX_RETRIES);
st_release_request(SRpnt);
- return (STp->buffer)->syscall_result;
+ return ret ? : STp->buffer->syscall_result;
}
@@ -2335,9 +2399,9 @@ static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
static int write_mode_page(struct scsi_tape *STp, int page, int slow)
{
- int pgo;
+ int pgo, timeout, ret = 0;
unsigned char cmd[MAX_COMMAND_SIZE];
- struct st_request *SRpnt = NULL;
+ struct st_request *SRpnt;
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = MODE_SELECT;
@@ -2351,14 +2415,21 @@ static int write_mode_page(struct scsi_tape *STp, int page, int slow)
(STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
(STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
- SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE,
- (slow ? STp->long_timeout : STp->device->timeout), 0, 1);
- if (SRpnt == NULL)
- return (STp->buffer)->syscall_result;
+ SRpnt = st_allocate_request(STp);
+ if (!SRpnt)
+ return ret;
+
+ timeout = slow ? STp->long_timeout :
+ STp->device->request_queue->rq_timeout;
+
+ ret = st_scsi_kern_execute(SRpnt, cmd, DMA_TO_DEVICE,
+ STp->buffer->b_data, cmd[4], timeout, 0);
+ if (!ret)
+ ret = STp->buffer->syscall_result;
st_release_request(SRpnt);
- return (STp->buffer)->syscall_result;
+ return ret;
}
@@ -2464,7 +2535,7 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
}
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
}
else
timeout = STp->long_timeout;
@@ -2476,13 +2547,16 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
printk(ST_DEB_MSG "%s: Loading tape.\n", name);
);
- SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
- timeout, MAX_RETRIES, 1);
+ SRpnt = st_allocate_request(STp);
if (!SRpnt)
- return (STp->buffer)->syscall_result;
+ return STp->buffer->syscall_result;
+
+ retval = st_scsi_kern_execute(SRpnt, cmd, DMA_NONE, NULL, 0, timeout,
+ MAX_RETRIES);
+ if (retval)
+ goto out;
retval = (STp->buffer)->syscall_result;
- st_release_request(SRpnt);
if (!retval) { /* SCSI command successful */
@@ -2501,6 +2575,8 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
STps = &(STp->ps[STp->partition]);
STps->drv_file = STps->drv_block = (-1);
}
+out:
+ st_release_request(SRpnt);
return retval;
}
@@ -2638,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
cmd[4] = arg;
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
DEBC(
if (cmd_in == MTWEOF)
printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
@@ -2656,7 +2732,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[0] = REZERO_UNIT;
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
}
DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
fileno = blkno = at_sm = 0;
@@ -2669,7 +2745,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[0] = START_STOP;
if (STp->immediate) {
cmd[1] = 1; /* Don't wait for completion */
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
}
cmd[4] = 3;
DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
@@ -2702,7 +2778,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
if (STp->immediate) {
cmd[1] |= 2; /* Don't wait for completion */
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
}
else
timeout = STp->long_timeout * 8;
@@ -2754,7 +2830,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
(STp->buffer)->b_data[9] = (ltmp >> 16);
(STp->buffer)->b_data[10] = (ltmp >> 8);
(STp->buffer)->b_data[11] = ltmp;
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
DEBC(
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
printk(ST_DEB_MSG
@@ -2776,12 +2852,15 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
return (-ENOSYS);
}
- SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
- timeout, MAX_RETRIES, 1);
+ SRpnt = st_allocate_request(STp);
if (!SRpnt)
return (STp->buffer)->syscall_result;
- ioctl_result = (STp->buffer)->syscall_result;
+ ioctl_result = st_scsi_kern_execute(SRpnt, cmd, direction,
+ STp->buffer->b_data, datalen,
+ timeout, MAX_RETRIES);
+ if (!ioctl_result)
+ ioctl_result = (STp->buffer)->syscall_result;
if (!ioctl_result) { /* SCSI command successful */
st_release_request(SRpnt);
@@ -2943,10 +3022,17 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
if (!logical && !STp->scsi2_logical)
scmd[1] = 1;
}
- SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
- STp->device->timeout, MAX_READY_RETRIES, 1);
+
+ SRpnt = st_allocate_request(STp);
if (!SRpnt)
- return (STp->buffer)->syscall_result;
+ return STp->buffer->syscall_result;
+
+ result = st_scsi_kern_execute(SRpnt, scmd, DMA_FROM_DEVICE,
+ STp->buffer->b_data, 20,
+ STp->device->request_queue->rq_timeout,
+ MAX_READY_RETRIES);
+ if (result)
+ goto out;
if ((STp->buffer)->syscall_result != 0 ||
(STp->device->scsi_level >= SCSI_2 &&
@@ -2974,6 +3060,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
*block, *partition));
}
+out:
st_release_request(SRpnt);
SRpnt = NULL;
@@ -3045,13 +3132,17 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
}
if (STp->immediate) {
scmd[1] |= 1; /* Don't wait for completion */
- timeout = STp->device->timeout;
+ timeout = STp->device->request_queue->rq_timeout;
}
- SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
- timeout, MAX_READY_RETRIES, 1);
+ SRpnt = st_allocate_request(STp);
if (!SRpnt)
- return (STp->buffer)->syscall_result;
+ return STp->buffer->syscall_result;
+
+ result = st_scsi_kern_execute(SRpnt, scmd, DMA_NONE, NULL, 0,
+ timeout, MAX_READY_RETRIES);
+ if (result)
+ goto out;
STps->drv_block = STps->drv_file = (-1);
STps->eof = ST_NOEOF;
@@ -3076,7 +3167,7 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
STps->drv_block = STps->drv_file = 0;
result = 0;
}
-
+out:
st_release_request(SRpnt);
SRpnt = NULL;
@@ -3263,7 +3354,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
* may try and take the device offline, in which case all further
* access to the device is prohibited.
*/
- retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file);
+ retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p,
+ file->f_flags & O_NDELAY);
if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV)
goto out;
retval = 0;
@@ -3567,8 +3659,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
!capable(CAP_SYS_RAWIO))
i = -EPERM;
else
- i = scsi_cmd_ioctl(file, STp->disk->queue,
- STp->disk, cmd_in, p);
+ i = scsi_cmd_ioctl(STp->disk->queue, STp->disk,
+ file->f_mode, cmd_in, p);
if (i != -ENOTTY)
return i;
break;
@@ -4028,7 +4120,7 @@ static int st_probe(struct device *dev)
tpnt->partition = 0;
tpnt->new_partition = 0;
tpnt->nbr_partitions = 0;
- tpnt->device->timeout = ST_TIMEOUT;
+ blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
tpnt->long_timeout = ST_LONG_TIMEOUT;
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
@@ -4428,13 +4520,10 @@ static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
snprintf(name, 10, "%s%s%s", rew ? "n" : "",
STp->disk->disk_name, st_formats[i]);
st_class_member =
- device_create_drvdata(st_sysfs_class,
- &STp->device->sdev_gendev,
- MKDEV(SCSI_TAPE_MAJOR,
- TAPE_MINOR(dev_num,
- mode, rew)),
- &STp->modes[mode],
- "%s", name);
+ device_create(st_sysfs_class, &STp->device->sdev_gendev,
+ MKDEV(SCSI_TAPE_MAJOR,
+ TAPE_MINOR(dev_num, mode, rew)),
+ &STp->modes[mode], "%s", name);
if (IS_ERR(st_class_member)) {
printk(KERN_WARNING "st%d: device_create failed\n",
dev_num);
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3790906a77d1..a3a18ad73125 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -477,7 +477,7 @@ stex_slave_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
- sdev->timeout = 60 * HZ;
+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
sdev->tagged_supported = 1;
return 0;
@@ -1108,8 +1108,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_scsi_host_put;
}
- hba->mmio_base = ioremap_nocache(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ hba->mmio_base = pci_ioremap_bar(pdev, 0);
if ( !hba->mmio_base) {
printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
pci_name(pdev));
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 7514b3a0390e..34a99620e5bd 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -213,7 +213,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
esp->ops = &sun3x_esp_ops;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res && !res->start)
+ if (!res || !res->start)
goto fail_unlink;
esp->regs = ioremap_nocache(res->start, 0x20);
@@ -221,7 +221,7 @@ static int __devinit esp_sun3x_probe(struct platform_device *dev)
goto fail_unmap_regs;
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
- if (!res && !res->start)
+ if (!res || !res->start)
goto fail_unmap_regs;
esp->dma_regs = ioremap_nocache(res->start, 0x10);
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index f7d279542fa5..e5c369bb568f 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -6,7 +6,7 @@
* Changes :
*
* Marcelo Tosatti <marcelo@conectiva.com.br> : Added io_request_lock locking
- * Alan Cox <alan@redhat.com> : Cleaned up code formatting
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> : Cleaned up code formatting
* Fixed an irq locking bug
* Added ISAPnP support
* Bjoern A. Zeeb <bzeeb@zabbadoz.net> : Initial irq locking updates
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 69ac6e590f1d..9a4273445c0d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2572,9 +2572,10 @@ static struct pci_driver dc390_driver = {
static int __init dc390_module_init(void)
{
- if (!disable_clustering)
+ if (!disable_clustering) {
printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
+ }
if (tmscsim[0] == -1 || tmscsim[0] > 15) {
tmscsim[0] = 7;
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 329eb8780e74..601e95141cbe 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,8 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
static void map_dma(unsigned int i, unsigned int j) {
unsigned int data_len = 0;
- unsigned int k, count, pci_dir;
+ unsigned int k, pci_dir;
+ int count;
struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index d4c13561f4a6..093610bcfcce 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -146,13 +146,13 @@
*
* use host->host_lock, not io_request_lock, cleanups
*
- * 2002/10/04 - Alan Cox <alan@redhat.com>
+ * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Use dev_id for interrupts, kill __func__ pasting
* Add a lock for the scb pool, clean up all other cli/sti usage stuff
* Use the adapter lock for the other places we had the cli's
*
- * 2002/10/06 - Alan Cox <alan@redhat.com>
+ * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Switch to new style error handling
* Clean up delay to udelay, and yielding sleeps