summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-19 00:50:34 +0400
committerIngo Molnar <mingo@elte.hu>2008-07-19 00:50:34 +0400
commita208f37a465e222218974ab20a31b42b7b4893b2 (patch)
tree77c6acdd4be32024330a14f2618b814126ce7a20 /drivers/scsi
parent511d9d34183662aada3890883e860b151d707e22 (diff)
parent5b664cb235e97afbf34db9c4d77f08ebd725335e (diff)
downloadlinux-a208f37a465e222218974ab20a31b42b7b4893b2.tar.xz
Merge branch 'linus' into x86/x2apic
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c3
-rw-r--r--drivers/scsi/3w-xxxx.c3
-rw-r--r--drivers/scsi/Kconfig27
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/a100u2w.c49
-rw-r--r--drivers/scsi/aacraid/commctrl.c33
-rw-r--r--drivers/scsi/aacraid/linit.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c7
-rw-r--r--drivers/scsi/arm/Kconfig2
-rw-r--r--drivers/scsi/arm/acornscsi-io.S15
-rw-r--r--drivers/scsi/arm/acornscsi.c426
-rw-r--r--drivers/scsi/arm/acornscsi.h9
-rw-r--r--drivers/scsi/ch.c4
-rw-r--r--drivers/scsi/device_handler/Kconfig32
-rw-r--r--drivers/scsi/device_handler/Makefile7
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c162
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c504
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c207
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c696
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/esp_scsi.c24
-rw-r--r--drivers/scsi/gdth.c3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3910
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h682
-rw-r--r--drivers/scsi/ide-scsi.c312
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c514
-rw-r--r--drivers/scsi/iscsi_tcp.h7
-rw-r--r--drivers/scsi/libiscsi.c1359
-rw-r--r--drivers/scsi/lpfc/lpfc.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c120
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c232
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/megaraid.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/osst.c15
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c110
-rw-r--r--drivers/scsi/scsi_error.c11
-rw-r--r--drivers/scsi/scsi_lib.c44
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c1
-rw-r--r--drivers/scsi/scsi_tgt_if.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c395
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--drivers/scsi/sd.h62
-rw-r--r--drivers/scsi/sg.c63
-rw-r--r--drivers/scsi/sr.c20
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_misc.h4
65 files changed, 8518 insertions, 2130 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 867f6fd5c2c0..7045511f9ad2 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -84,6 +84,7 @@
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -862,11 +863,13 @@ out:
} /* End twa_chrdev_ioctl() */
/* This function handles open for the character device */
+/* NOTE that this function will race with remove. */
static int twa_chrdev_open(struct inode *inode, struct file *file)
{
unsigned int minor_number;
int retval = TW_IOCTL_ERROR_OS_ENODEV;
+ cycle_kernel_lock();
minor_number = iminor(inode);
if (minor_number >= twa_device_extension_count)
goto out;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 8c22329aa85e..a0537f09aa21 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -198,6 +198,7 @@
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
@@ -1027,10 +1028,12 @@ out:
} /* End tw_chrdev_ioctl() */
/* This function handles open for the character device */
+/* NOTE that this function races with remove. */
static int tw_chrdev_open(struct inode *inode, struct file *file)
{
unsigned int minor_number;
+ cycle_kernel_lock();
dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
minor_number = iminor(inode);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 81ccbd7f9e34..26be540d1dd3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -888,6 +888,25 @@ config SCSI_IBMVSCSIS
To compile this driver as a module, choose M here: the
module will be called ibmvstgt.
+config SCSI_IBMVFC
+ tristate "IBM Virtual FC support"
+ depends on PPC_PSERIES && SCSI
+ select SCSI_FC_ATTRS
+ help
+ This is the IBM POWER Virtual FC Client
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvfc.
+
+config SCSI_IBMVFC_TRACE
+ bool "enable driver internal trace"
+ depends on SCSI_IBMVFC
+ default y
+ help
+ If you say Y here, the driver will trace all commands issued
+ to the adapter. Performance impact is minimal. Trace can be
+ dumped using /sys/class/scsi_host/hostXX/trace.
+
config SCSI_INITIO
tristate "Initio 9100U(W) support"
depends on PCI && SCSI
@@ -1738,10 +1757,12 @@ config SCSI_SUNESP
select SCSI_SPI_ATTRS
help
This is the driver for the Sun ESP SCSI host adapter. The ESP
- chipset is present in most SPARC SBUS-based computers.
+ chipset is present in most SPARC SBUS-based computers and
+ supports the Emulex family of ESP SCSI chips (esp100, esp100A,
+ esp236, fas101, fas236) as well as the Qlogic fas366 SCSI chip.
To compile this driver as a module, choose M here: the
- module will be called esp.
+ module will be called sun_esp.
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
@@ -1771,4 +1792,6 @@ endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"
+source "drivers/scsi/device_handler/Kconfig"
+
endmenu
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6c775e350c98..a8149677de23 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
+obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -118,6 +119,7 @@ obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ced3eebe252c..84bb61628372 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -389,7 +389,7 @@ static u8 orc_load_firmware(struct orc_host * host)
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
data32_ptr = (u8 *) & data32;
- data32 = 0; /* Initial FW address to 0 */
+ data32 = cpu_to_le32(0); /* Initial FW address to 0 */
outw(0x0010, host->base + ORC_EBIOSADR0);
*data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(0x0011, host->base + ORC_EBIOSADR0);
@@ -397,18 +397,19 @@ static u8 orc_load_firmware(struct orc_host * host)
outw(0x0012, host->base + ORC_EBIOSADR0);
*(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
- outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */
+ outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */
/* Copy the code from the BIOS to the SRAM */
- bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */
+ udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */
+ bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */
for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */
+ outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */
data32_ptr = (u8 *) & data32;
}
}
@@ -423,7 +424,7 @@ static u8 orc_load_firmware(struct orc_host * host)
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
- if (inl(host->base + ORC_RISCRAM) != data32) {
+ if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) {
outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
return 0;
@@ -459,8 +460,8 @@ static void setup_SCBs(struct orc_host * host)
for (i = 0; i < ORC_MAXQUEUE; i++) {
escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
- scb->sg_addr = (u32) escb_phys;
- scb->sense_addr = (u32) escb_phys;
+ scb->sg_addr = cpu_to_le32((u32) escb_phys);
+ scb->sense_addr = cpu_to_le32((u32) escb_phys);
scb->escb = escb;
scb->scbidx = i;
scb++;
@@ -642,8 +643,8 @@ static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsig
scb->link = 0xFF;
scb->reserved0 = 0;
scb->reserved1 = 0;
- scb->xferlen = 0;
- scb->sg_len = 0;
+ scb->xferlen = cpu_to_le32(0);
+ scb->sg_len = cpu_to_le32(0);
escb->srb = NULL;
escb->srb = cmd;
@@ -839,7 +840,7 @@ static irqreturn_t orc_interrupt(struct orc_host * host)
* Build a host adapter control block from the SCSI mid layer command
*/
-static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
+static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
{ /* Create corresponding SCB */
struct scatterlist *sg;
struct orc_sgent *sgent; /* Pointer to SG list */
@@ -858,28 +859,30 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
scb->lun = cmd->device->lun;
scb->reserved0 = 0;
scb->reserved1 = 0;
- scb->sg_len = 0;
+ scb->sg_len = cpu_to_le32(0);
- scb->xferlen = (u32) scsi_bufflen(cmd);
+ scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd));
sgent = (struct orc_sgent *) & escb->sglist[0];
count_sg = scsi_dma_map(cmd);
- BUG_ON(count_sg < 0);
+ if (count_sg < 0)
+ return count_sg;
+ BUG_ON(count_sg > TOTAL_SG_ENTRY);
/* Build the scatter gather lists */
if (count_sg) {
- scb->sg_len = (u32) (count_sg * 8);
+ scb->sg_len = cpu_to_le32((u32) (count_sg * 8));
scsi_for_each_sg(cmd, sg, count_sg, i) {
- sgent->base = (u32) sg_dma_address(sg);
- sgent->length = (u32) sg_dma_len(sg);
+ sgent->base = cpu_to_le32((u32) sg_dma_address(sg));
+ sgent->length = cpu_to_le32((u32) sg_dma_len(sg));
sgent++;
}
} else {
- scb->sg_len = 0;
- sgent->base = 0;
- sgent->length = 0;
+ scb->sg_len = cpu_to_le32(0);
+ sgent->base = cpu_to_le32(0);
+ sgent->length = cpu_to_le32(0);
}
- scb->sg_addr = (u32) scb->sense_addr;
+ scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */
scb->hastat = 0;
scb->tastat = 0;
scb->link = 0xFF;
@@ -896,6 +899,7 @@ static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, stru
scb->tag_msg = 0; /* No tag support */
}
memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
+ return 0;
}
/**
@@ -919,7 +923,10 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
- inia100_build_scb(host, scb, cmd);
+ if (inia100_build_scb(host, scb, cmd)) {
+ orc_release_scb(host, scb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
orc_exec_scb(host, scb); /* Start execute SCB */
return 0;
}
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 5fd83deab36c..a7355260cfcf 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -41,6 +41,7 @@
#include <linux/kthread.h>
#include <linux/semaphore.h>
#include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
#include "aacraid.h"
@@ -581,6 +582,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
+ if (upsg->sg[i].count >
+ (dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
@@ -625,6 +634,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
+ if (usg->sg[i].count >
+ (dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
@@ -667,6 +684,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
+ if (usg->sg[i].count >
+ (dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
@@ -698,6 +723,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
+ if (upsg->sg[i].count >
+ (dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 1f7c83607f84..9aa301c1ed07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/delay.h>
@@ -667,6 +668,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
unsigned minor_number = iminor(inode);
int err = -ENODEV;
+ lock_kernel(); /* BKL pushdown: nothing else protects this list */
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id == minor_number) {
file->private_data = aac;
@@ -674,6 +676,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
break;
}
}
+ unlock_kernel();
return err;
}
@@ -862,7 +865,7 @@ static ssize_t aac_show_bios_version(struct device *device,
return len;
}
-ssize_t aac_show_serial_number(struct device *device,
+static ssize_t aac_show_serial_number(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 4446e3d584dc..8630a75b2872 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -1093,9 +1093,9 @@ out:
* @bytes_to_verify: total bytes to verify
*/
int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
- void *src, u32 dest_offset, u32 bytes_to_verify)
+ const void *src, u32 dest_offset, u32 bytes_to_verify)
{
- u8 *src_buf;
+ const u8 *src_buf;
u8 flash_char;
int err;
u32 nv_offset, reg, i;
@@ -1105,7 +1105,7 @@ int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
err = FLASH_OK;
nv_offset = dest_offset;
- src_buf = (u8 *)src;
+ src_buf = (const u8 *)src;
for (i = 0; i < bytes_to_verify; i++) {
flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i);
if (flash_char != src_buf[i]) {
@@ -1124,9 +1124,9 @@ int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
* @bytes_to_write: total bytes to write
*/
int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
- void *src, u32 dest_offset, u32 bytes_to_write)
+ const void *src, u32 dest_offset, u32 bytes_to_write)
{
- u8 *src_buf;
+ const u8 *src_buf;
u32 nv_offset, reg, i;
int err;
@@ -1153,7 +1153,7 @@ int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
return err;
}
- src_buf = (u8 *)src;
+ src_buf = (const u8 *)src;
for (i = 0; i < bytes_to_write; i++) {
/* Setup program command sequence */
switch (asd_ha->hw_prof.flash.method) {
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.h b/drivers/scsi/aic94xx/aic94xx_sds.h
index bb9795a04dc3..a06dc0114b8c 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.h
+++ b/drivers/scsi/aic94xx/aic94xx_sds.h
@@ -110,9 +110,9 @@ struct bios_file_header {
};
int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
- void *src, u32 dest_offset, u32 bytes_to_verify);
+ const void *src, u32 dest_offset, u32 bytes_to_verify);
int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
- void *src, u32 dest_offset, u32 bytes_to_write);
+ const void *src, u32 dest_offset, u32 bytes_to_write);
int asd_chk_write_status(struct asd_ha_struct *asd_ha,
u32 sector_addr, u8 erase_flag);
int asd_check_flash_type(struct asd_ha_struct *asd_ha);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index f4272ac4c685..8f98e33155e9 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -46,7 +46,7 @@
static const struct firmware *sequencer_fw;
static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
cseq_idle_loop, lseq_idle_loop;
-static u8 *cseq_code, *lseq_code;
+static const u8 *cseq_code, *lseq_code;
static u32 cseq_code_size, lseq_code_size;
static u16 first_scb_site_no = 0xFFFF;
@@ -1235,7 +1235,8 @@ int asd_release_firmware(void)
static int asd_request_firmware(struct asd_ha_struct *asd_ha)
{
int err, i;
- struct sequencer_file_header header, *hdr_ptr;
+ struct sequencer_file_header header;
+ const struct sequencer_file_header *hdr_ptr;
u32 csum = 0;
u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
@@ -1249,7 +1250,7 @@ static int asd_request_firmware(struct asd_ha_struct *asd_ha)
if (err)
return err;
- hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data;
+ hdr_ptr = (const struct sequencer_file_header *)sequencer_fw->data;
header.csum = le32_to_cpu(hdr_ptr->csum);
header.major = le32_to_cpu(hdr_ptr->major);
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index 7236143941f3..a8587f1f5e7e 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -3,7 +3,7 @@
#
config SCSI_ACORNSCSI_3
tristate "Acorn SCSI card (aka30) support"
- depends on ARCH_ACORN && SCSI && BROKEN
+ depends on ARCH_ACORN && SCSI
select SCSI_SPI_ATTRS
help
This enables support for the Acorn SCSI card (aka30). If you have an
diff --git a/drivers/scsi/arm/acornscsi-io.S b/drivers/scsi/arm/acornscsi-io.S
index 93467e6ac923..5cebe3105260 100644
--- a/drivers/scsi/arm/acornscsi-io.S
+++ b/drivers/scsi/arm/acornscsi-io.S
@@ -10,17 +10,10 @@
#include <asm/assembler.h>
#include <asm/hardware.h>
-#if (IO_BASE == (PCIO_BASE & 0xff000000))
-#define ADDR(off,reg) \
- tst off, $0x80000000 ;\
- mov reg, $IO_BASE ;\
- orreq reg, reg, $(PCIO_BASE & 0x00ff0000)
-#else
-#define ADDR(off,reg) \
- tst off, $0x80000000 ;\
- movne reg, $IO_BASE ;\
- moveq reg, $(PCIO_BASE & 0xff000000) ;\
- orreq reg, reg, $(PCIO_BASE & 0x00ff0000)
+#if defined(__APCS_32__)
+#define LOADREGS(t,r,l...) ldm##t r, l
+#elif defined(__APCS_26__)
+#define LOADREGS(t,r,l...) ldm##t r, l##^
#endif
@ Purpose: transfer a block of data from the acorn scsi card to memory
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 8e53f02cc311..918ccf818757 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -123,12 +123,6 @@
#define DBG(cmd,xxx...) xxx
#endif
-#ifndef STRINGIFY
-#define STRINGIFY(x) #x
-#endif
-#define STRx(x) STRINGIFY(x)
-#define NO_WRITE_STR STRx(NO_WRITE)
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -141,9 +135,10 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/stringify.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/ecard.h>
#include "../scsi.h"
@@ -203,44 +198,46 @@ static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
* Miscellaneous
*/
-static inline void
-sbic_arm_write(unsigned int io_port, int reg, int value)
+/* Offsets from MEMC base */
+#define SBIC_REGIDX 0x2000
+#define SBIC_REGVAL 0x2004
+#define DMAC_OFFSET 0x3000
+
+/* Offsets from FAST IOC base */
+#define INT_REG 0x2000
+#define PAGE_REG 0x3000
+
+static inline void sbic_arm_write(AS_Host *host, unsigned int reg, unsigned int value)
{
- __raw_writeb(reg, io_port);
- __raw_writeb(value, io_port + 4);
+ writeb(reg, host->base + SBIC_REGIDX);
+ writeb(value, host->base + SBIC_REGVAL);
}
-#define sbic_arm_writenext(io,val) \
- __raw_writeb((val), (io) + 4)
-
-static inline
-int sbic_arm_read(unsigned int io_port, int reg)
+static inline int sbic_arm_read(AS_Host *host, unsigned int reg)
{
if(reg == SBIC_ASR)
- return __raw_readl(io_port) & 255;
- __raw_writeb(reg, io_port);
- return __raw_readl(io_port + 4) & 255;
+ return readl(host->base + SBIC_REGIDX) & 255;
+ writeb(reg, host->base + SBIC_REGIDX);
+ return readl(host->base + SBIC_REGVAL) & 255;
}
-#define sbic_arm_readnext(io) \
- __raw_readb((io) + 4)
+#define sbic_arm_writenext(host, val) writeb((val), (host)->base + SBIC_REGVAL)
+#define sbic_arm_readnext(host) readb((host)->base + SBIC_REGVAL)
#ifdef USE_DMAC
-#define dmac_read(io_port,reg) \
- inb((io_port) + (reg))
+#define dmac_read(host,reg) \
+ readb((host)->base + DMAC_OFFSET + ((reg) << 2))
-#define dmac_write(io_port,reg,value) \
- ({ outb((value), (io_port) + (reg)); })
+#define dmac_write(host,reg,value) \
+ ({ writeb((value), (host)->base + DMAC_OFFSET + ((reg) << 2)); })
-#define dmac_clearintr(io_port) \
- ({ outb(0, (io_port)); })
+#define dmac_clearintr(host) writeb(0, (host)->fast + INT_REG)
-static inline
-unsigned int dmac_address(unsigned int io_port)
+static inline unsigned int dmac_address(AS_Host *host)
{
- return dmac_read(io_port, DMAC_TXADRHI) << 16 |
- dmac_read(io_port, DMAC_TXADRMD) << 8 |
- dmac_read(io_port, DMAC_TXADRLO);
+ return dmac_read(host, DMAC_TXADRHI) << 16 |
+ dmac_read(host, DMAC_TXADRMD) << 8 |
+ dmac_read(host, DMAC_TXADRLO);
}
static
@@ -248,15 +245,15 @@ void acornscsi_dumpdma(AS_Host *host, char *where)
{
unsigned int mode, addr, len;
- mode = dmac_read(host->dma.io_port, DMAC_MODECON);
- addr = dmac_address(host->dma.io_port);
- len = dmac_read(host->dma.io_port, DMAC_TXCNTHI) << 8 |
- dmac_read(host->dma.io_port, DMAC_TXCNTLO);
+ mode = dmac_read(host, DMAC_MODECON);
+ addr = dmac_address(host);
+ len = dmac_read(host, DMAC_TXCNTHI) << 8 |
+ dmac_read(host, DMAC_TXCNTLO);
printk("scsi%d: %s: DMAC %02x @%06x+%04x msk %02x, ",
host->host->host_no, where,
mode, addr, (len + 1) & 0xffff,
- dmac_read(host->dma.io_port, DMAC_MASKREG));
+ dmac_read(host, DMAC_MASKREG));
printk("DMA @%06x, ", host->dma.start_addr);
printk("BH @%p +%04x, ", host->scsi.SCp.ptr,
@@ -272,9 +269,9 @@ unsigned long acornscsi_sbic_xfcount(AS_Host *host)
{
unsigned long length;
- length = sbic_arm_read(host->scsi.io_port, SBIC_TRANSCNTH) << 16;
- length |= sbic_arm_readnext(host->scsi.io_port) << 8;
- length |= sbic_arm_readnext(host->scsi.io_port);
+ length = sbic_arm_read(host, SBIC_TRANSCNTH) << 16;
+ length |= sbic_arm_readnext(host) << 8;
+ length |= sbic_arm_readnext(host);
return length;
}
@@ -285,7 +282,7 @@ acornscsi_sbic_wait(AS_Host *host, int stat_mask, int stat, int timeout, char *m
int asr;
do {
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
+ asr = sbic_arm_read(host, SBIC_ASR);
if ((asr & stat_mask) == stat)
return 0;
@@ -304,7 +301,7 @@ int acornscsi_sbic_issuecmd(AS_Host *host, int command)
if (acornscsi_sbic_wait(host, ASR_CIP, 0, 1000, "issuing command"))
return -1;
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, command);
+ sbic_arm_write(host, SBIC_CMND, command);
return 0;
}
@@ -331,20 +328,20 @@ void acornscsi_resetcard(AS_Host *host)
/* assert reset line */
host->card.page_reg = 0x80;
- outb(host->card.page_reg, host->card.io_page);
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
/* wait 3 cs. SCSI standard says 25ms. */
acornscsi_csdelay(3);
host->card.page_reg = 0;
- outb(host->card.page_reg, host->card.io_page);
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
/*
* Should get a reset from the card
*/
timeout = 1000;
do {
- if (inb(host->card.io_intr) & 8)
+ if (readb(host->fast + INT_REG) & 8)
break;
udelay(1);
} while (--timeout);
@@ -353,19 +350,19 @@ void acornscsi_resetcard(AS_Host *host)
printk("scsi%d: timeout while resetting card\n",
host->host->host_no);
- sbic_arm_read(host->scsi.io_port, SBIC_ASR);
- sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ sbic_arm_read(host, SBIC_ASR);
+ sbic_arm_read(host, SBIC_SSR);
/* setup sbic - WD33C93A */
- sbic_arm_write(host->scsi.io_port, SBIC_OWNID, OWNID_EAF | host->host->this_id);
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_RESET);
+ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
+ sbic_arm_write(host, SBIC_CMND, CMND_RESET);
/*
* Command should cause a reset interrupt
*/
timeout = 1000;
do {
- if (inb(host->card.io_intr) & 8)
+ if (readb(host->fast + INT_REG) & 8)
break;
udelay(1);
} while (--timeout);
@@ -374,26 +371,26 @@ void acornscsi_resetcard(AS_Host *host)
printk("scsi%d: timeout while resetting card\n",
host->host->host_no);
- sbic_arm_read(host->scsi.io_port, SBIC_ASR);
- if (sbic_arm_read(host->scsi.io_port, SBIC_SSR) != 0x01)
+ sbic_arm_read(host, SBIC_ASR);
+ if (sbic_arm_read(host, SBIC_SSR) != 0x01)
printk(KERN_CRIT "scsi%d: WD33C93A didn't give enhanced reset interrupt\n",
host->host->host_no);
- sbic_arm_write(host->scsi.io_port, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
- sbic_arm_write(host->scsi.io_port, SBIC_TIMEOUT, TIMEOUT_TIME);
- sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
- sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+ sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
+ sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
host->card.page_reg = 0x40;
- outb(host->card.page_reg, host->card.io_page);
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
/* setup dmac - uPC71071 */
- dmac_write(host->dma.io_port, DMAC_INIT, 0);
+ dmac_write(host, DMAC_INIT, 0);
#ifdef USE_DMAC
- dmac_write(host->dma.io_port, DMAC_INIT, INIT_8BIT);
- dmac_write(host->dma.io_port, DMAC_CHANNEL, CHANNEL_0);
- dmac_write(host->dma.io_port, DMAC_DEVCON0, INIT_DEVCON0);
- dmac_write(host->dma.io_port, DMAC_DEVCON1, INIT_DEVCON1);
+ dmac_write(host, DMAC_INIT, INIT_8BIT);
+ dmac_write(host, DMAC_CHANNEL, CHANNEL_0);
+ dmac_write(host, DMAC_DEVCON0, INIT_DEVCON0);
+ dmac_write(host, DMAC_DEVCON1, INIT_DEVCON1);
#endif
host->SCpnt = NULL;
@@ -741,9 +738,9 @@ intr_ret_t acornscsi_kick(AS_Host *host)
* If we have an interrupt pending, then we may have been reselected.
* In this case, we don't want to write to the registers
*/
- if (!(sbic_arm_read(host->scsi.io_port, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) {
- sbic_arm_write(host->scsi.io_port, SBIC_DESTID, SCpnt->device->id);
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_SELWITHATN);
+ if (!(sbic_arm_read(host, SBIC_ASR) & (ASR_INT|ASR_BSY|ASR_CIP))) {
+ sbic_arm_write(host, SBIC_DESTID, SCpnt->device->id);
+ sbic_arm_write(host, SBIC_CMND, CMND_SELWITHATN);
}
/*
@@ -807,7 +804,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
struct scsi_cmnd *SCpnt = *SCpntp;
/* clean up */
- sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
host->stats.fins += 1;
@@ -918,13 +915,13 @@ static
void acornscsi_data_read(AS_Host *host, char *ptr,
unsigned int start_addr, unsigned int length)
{
- extern void __acornscsi_in(int port, char *buf, int len);
+ extern void __acornscsi_in(void __iomem *, char *buf, int len);
unsigned int page, offset, len = length;
page = (start_addr >> 12);
offset = start_addr & ((1 << 12) - 1);
- outb((page & 0x3f) | host->card.page_reg, host->card.io_page);
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
while (len > 0) {
unsigned int this_len;
@@ -934,7 +931,7 @@ void acornscsi_data_read(AS_Host *host, char *ptr,
else
this_len = len;
- __acornscsi_in(host->card.io_ram + (offset << 1), ptr, this_len);
+ __acornscsi_in(host->base + (offset << 1), ptr, this_len);
offset += this_len;
ptr += this_len;
@@ -943,10 +940,10 @@ void acornscsi_data_read(AS_Host *host, char *ptr,
if (offset == (1 << 12)) {
offset = 0;
page ++;
- outb((page & 0x3f) | host->card.page_reg, host->card.io_page);
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
}
}
- outb(host->card.page_reg, host->card.io_page);
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
}
/*
@@ -963,13 +960,13 @@ static
void acornscsi_data_write(AS_Host *host, char *ptr,
unsigned int start_addr, unsigned int length)
{
- extern void __acornscsi_out(int port, char *buf, int len);
+ extern void __acornscsi_out(void __iomem *, char *buf, int len);
unsigned int page, offset, len = length;
page = (start_addr >> 12);
offset = start_addr & ((1 << 12) - 1);
- outb((page & 0x3f) | host->card.page_reg, host->card.io_page);
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
while (len > 0) {
unsigned int this_len;
@@ -979,7 +976,7 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
else
this_len = len;
- __acornscsi_out(host->card.io_ram + (offset << 1), ptr, this_len);
+ __acornscsi_out(host->base + (offset << 1), ptr, this_len);
offset += this_len;
ptr += this_len;
@@ -988,10 +985,10 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
if (offset == (1 << 12)) {
offset = 0;
page ++;
- outb((page & 0x3f) | host->card.page_reg, host->card.io_page);
+ writeb((page & 0x3f) | host->card.page_reg, host->fast + PAGE_REG);
}
}
- outb(host->card.page_reg, host->card.io_page);
+ writeb(host->card.page_reg, host->fast + PAGE_REG);
}
/* =========================================================================================
@@ -1008,8 +1005,8 @@ void acornscsi_data_write(AS_Host *host, char *ptr,
static inline
void acornscsi_dma_stop(AS_Host *host)
{
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON);
- dmac_clearintr(host->dma.io_intr_clear);
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
#if (DEBUG & DEBUG_DMA)
DBG(host->SCpnt, acornscsi_dumpdma(host, "stop"));
@@ -1031,7 +1028,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
host->dma.direction = direction;
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON);
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
if (direction == DMA_OUT) {
#if (DEBUG & DEBUG_NO_WRITE)
@@ -1062,13 +1059,13 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
length);
length -= 1;
- dmac_write(host->dma.io_port, DMAC_TXCNTLO, length);
- dmac_write(host->dma.io_port, DMAC_TXCNTHI, length >> 8);
- dmac_write(host->dma.io_port, DMAC_TXADRLO, address);
- dmac_write(host->dma.io_port, DMAC_TXADRMD, address >> 8);
- dmac_write(host->dma.io_port, DMAC_TXADRHI, 0);
- dmac_write(host->dma.io_port, DMAC_MODECON, mode);
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF);
+ dmac_write(host, DMAC_TXCNTLO, length);
+ dmac_write(host, DMAC_TXCNTHI, length >> 8);
+ dmac_write(host, DMAC_TXADRLO, address);
+ dmac_write(host, DMAC_TXADRMD, address >> 8);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MODECON, mode);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
#if (DEBUG & DEBUG_DMA)
DBG(host->SCpnt, acornscsi_dumpdma(host, "strt"));
@@ -1088,8 +1085,8 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
static
void acornscsi_dma_cleanup(AS_Host *host)
{
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON);
- dmac_clearintr(host->dma.io_intr_clear);
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
/*
* Check for a pending transfer
@@ -1116,7 +1113,7 @@ void acornscsi_dma_cleanup(AS_Host *host)
/*
* Calculate number of bytes transferred from DMA.
*/
- transferred = dmac_address(host->dma.io_port) - host->dma.start_addr;
+ transferred = dmac_address(host) - host->dma.start_addr;
host->dma.transferred += transferred;
if (host->dma.direction == DMA_IN)
@@ -1152,13 +1149,13 @@ void acornscsi_dma_intr(AS_Host *host)
DBG(host->SCpnt, acornscsi_dumpdma(host, "inti"));
#endif
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_ON);
- dmac_clearintr(host->dma.io_intr_clear);
+ dmac_write(host, DMAC_MASKREG, MASK_ON);
+ dmac_clearintr(host);
/*
* Calculate amount transferred via DMA
*/
- transferred = dmac_address(host->dma.io_port) - host->dma.start_addr;
+ transferred = dmac_address(host) - host->dma.start_addr;
host->dma.transferred += transferred;
/*
@@ -1190,12 +1187,12 @@ void acornscsi_dma_intr(AS_Host *host)
length);
length -= 1;
- dmac_write(host->dma.io_port, DMAC_TXCNTLO, length);
- dmac_write(host->dma.io_port, DMAC_TXCNTHI, length >> 8);
- dmac_write(host->dma.io_port, DMAC_TXADRLO, address);
- dmac_write(host->dma.io_port, DMAC_TXADRMD, address >> 8);
- dmac_write(host->dma.io_port, DMAC_TXADRHI, 0);
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF);
+ dmac_write(host, DMAC_TXCNTLO, length);
+ dmac_write(host, DMAC_TXCNTHI, length >> 8);
+ dmac_write(host, DMAC_TXADRLO, address);
+ dmac_write(host, DMAC_TXADRMD, address >> 8);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
#if (DEBUG & DEBUG_DMA)
DBG(host->SCpnt, acornscsi_dumpdma(host, "into"));
@@ -1209,15 +1206,15 @@ void acornscsi_dma_intr(AS_Host *host)
* attention condition. We continue giving one byte until
* the device recognises the attention.
*/
- if (dmac_read(host->dma.io_port, DMAC_STATUS) & STATUS_RQ0) {
+ if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
acornscsi_abortcmd(host, host->SCpnt->tag);
- dmac_write(host->dma.io_port, DMAC_TXCNTLO, 0);
- dmac_write(host->dma.io_port, DMAC_TXCNTHI, 0);
- dmac_write(host->dma.io_port, DMAC_TXADRLO, 0);
- dmac_write(host->dma.io_port, DMAC_TXADRMD, 0);
- dmac_write(host->dma.io_port, DMAC_TXADRHI, 0);
- dmac_write(host->dma.io_port, DMAC_MASKREG, MASK_OFF);
+ dmac_write(host, DMAC_TXCNTLO, 0);
+ dmac_write(host, DMAC_TXCNTHI, 0);
+ dmac_write(host, DMAC_TXADRLO, 0);
+ dmac_write(host, DMAC_TXADRMD, 0);
+ dmac_write(host, DMAC_TXADRHI, 0);
+ dmac_write(host, DMAC_MASKREG, MASK_OFF);
}
#endif
}
@@ -1271,9 +1268,9 @@ void acornscsi_dma_adjust(AS_Host *host)
host->dma.xfer_setup = 0;
else {
transferred += host->dma.start_addr;
- dmac_write(host->dma.io_port, DMAC_TXADRLO, transferred);
- dmac_write(host->dma.io_port, DMAC_TXADRMD, transferred >> 8);
- dmac_write(host->dma.io_port, DMAC_TXADRHI, transferred >> 16);
+ dmac_write(host, DMAC_TXADRLO, transferred);
+ dmac_write(host, DMAC_TXADRMD, transferred >> 8);
+ dmac_write(host, DMAC_TXADRHI, transferred >> 16);
#if (DEBUG & (DEBUG_DMA|DEBUG_WRITE))
DBG(host->SCpnt, acornscsi_dumpdma(host, "adjo"));
#endif
@@ -1292,12 +1289,12 @@ acornscsi_write_pio(AS_Host *host, char *bytes, int *ptr, int len, unsigned int
int my_ptr = *ptr;
while (my_ptr < len) {
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
+ asr = sbic_arm_read(host, SBIC_ASR);
if (asr & ASR_DBR) {
timeout = max_timeout;
- sbic_arm_write(host->scsi.io_port, SBIC_DATA, bytes[my_ptr++]);
+ sbic_arm_write(host, SBIC_DATA, bytes[my_ptr++]);
} else if (asr & ASR_INT)
break;
else if (--timeout == 0)
@@ -1320,9 +1317,9 @@ acornscsi_sendcommand(AS_Host *host)
{
struct scsi_cmnd *SCpnt = host->SCpnt;
- sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0);
- sbic_arm_writenext(host->scsi.io_port, 0);
- sbic_arm_writenext(host->scsi.io_port, SCpnt->cmd_len - host->scsi.SCp.sent_command);
+ sbic_arm_write(host, SBIC_TRANSCNTH, 0);
+ sbic_arm_writenext(host, 0);
+ sbic_arm_writenext(host, SCpnt->cmd_len - host->scsi.SCp.sent_command);
acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
@@ -1351,7 +1348,7 @@ void acornscsi_sendmessage(AS_Host *host)
acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 1");
- sbic_arm_write(host->scsi.io_port, SBIC_DATA, NOP);
+ sbic_arm_write(host, SBIC_DATA, NOP);
host->scsi.last_message = NOP;
#if (DEBUG & DEBUG_MESSAGES)
@@ -1365,7 +1362,7 @@ void acornscsi_sendmessage(AS_Host *host)
acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "sending message 2");
- sbic_arm_write(host->scsi.io_port, SBIC_DATA, msg->msg[0]);
+ sbic_arm_write(host, SBIC_DATA, msg->msg[0]);
host->scsi.last_message = msg->msg[0];
#if (DEBUG & DEBUG_MESSAGES)
@@ -1382,9 +1379,9 @@ void acornscsi_sendmessage(AS_Host *host)
* initiator. This provides an interlock so that the
* initiator can determine which message byte is rejected.
*/
- sbic_arm_write(host->scsi.io_port, SBIC_TRANSCNTH, 0);
- sbic_arm_writenext(host->scsi.io_port, 0);
- sbic_arm_writenext(host->scsi.io_port, message_length);
+ sbic_arm_write(host, SBIC_TRANSCNTH, 0);
+ sbic_arm_writenext(host, 0);
+ sbic_arm_writenext(host, message_length);
acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
msgnr = 0;
@@ -1421,7 +1418,7 @@ void acornscsi_readstatusbyte(AS_Host *host)
{
acornscsi_sbic_issuecmd(host, CMND_XFERINFO|CMND_SBT);
acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "reading status byte");
- host->scsi.SCp.Status = sbic_arm_read(host->scsi.io_port, SBIC_DATA);
+ host->scsi.SCp.Status = sbic_arm_read(host, SBIC_DATA);
}
/*
@@ -1438,12 +1435,12 @@ unsigned char acornscsi_readmessagebyte(AS_Host *host)
acornscsi_sbic_wait(host, ASR_DBR, ASR_DBR, 1000, "for message byte");
- message = sbic_arm_read(host->scsi.io_port, SBIC_DATA);
+ message = sbic_arm_read(host, SBIC_DATA);
/* wait for MSGIN-XFER-PAUSED */
acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after message byte");
- sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ sbic_arm_read(host, SBIC_SSR);
return message;
}
@@ -1480,7 +1477,7 @@ void acornscsi_message(AS_Host *host)
/* wait for next msg-in */
acornscsi_sbic_wait(host, ASR_INT, ASR_INT, 1000, "for interrupt after negate ack");
- sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ sbic_arm_read(host, SBIC_SSR);
}
} while (msgidx < msglen);
@@ -1602,7 +1599,7 @@ void acornscsi_message(AS_Host *host)
host->host->host_no, acornscsi_target(host));
host->device[host->SCpnt->device->id].sync_xfer = SYNCHTRANSFER_2DBA;
host->device[host->SCpnt->device->id].sync_state = SYNC_ASYNCHRONOUS;
- sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
break;
default:
@@ -1652,7 +1649,7 @@ void acornscsi_message(AS_Host *host)
host->device[host->SCpnt->device->id].sync_xfer =
calc_sync_xfer(period * 4, length);
}
- sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
break;
#else
/* We do not accept synchronous transfers. Respond with a
@@ -1792,10 +1789,10 @@ int acornscsi_starttransfer(AS_Host *host)
residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
- sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
- sbic_arm_writenext(host->scsi.io_port, residual >> 16);
- sbic_arm_writenext(host->scsi.io_port, residual >> 8);
- sbic_arm_writenext(host->scsi.io_port, residual);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
+ sbic_arm_writenext(host, residual >> 16);
+ sbic_arm_writenext(host, residual >> 8);
+ sbic_arm_writenext(host, residual);
acornscsi_sbic_issuecmd(host, CMND_XFERINFO);
return 1;
}
@@ -1816,7 +1813,7 @@ int acornscsi_reconnect(AS_Host *host)
{
unsigned int target, lun, ok = 0;
- target = sbic_arm_read(host->scsi.io_port, SBIC_SOURCEID);
+ target = sbic_arm_read(host, SBIC_SOURCEID);
if (!(target & 8))
printk(KERN_ERR "scsi%d: invalid source id after reselection "
@@ -1832,7 +1829,7 @@ int acornscsi_reconnect(AS_Host *host)
host->SCpnt = NULL;
}
- lun = sbic_arm_read(host->scsi.io_port, SBIC_DATA) & 7;
+ lun = sbic_arm_read(host, SBIC_DATA) & 7;
host->scsi.reconnected.target = target;
host->scsi.reconnected.lun = lun;
@@ -1952,7 +1949,7 @@ static
void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
{
host->scsi.phase = PHASE_ABORTED;
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_ASSERTATN);
+ sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
msgqueue_flush(&host->scsi.msgs);
#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
@@ -1979,11 +1976,11 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
{
unsigned int asr, ssr;
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
+ asr = sbic_arm_read(host, SBIC_ASR);
if (!(asr & ASR_INT))
return INTR_IDLE;
- ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
#if (DEBUG & DEBUG_PHASES)
print_sbic_status(asr, ssr, host->scsi.phase);
@@ -1999,15 +1996,15 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d: reset in standard mode but wanted advanced mode.\n",
host->host->host_no);
/* setup sbic - WD33C93A */
- sbic_arm_write(host->scsi.io_port, SBIC_OWNID, OWNID_EAF | host->host->this_id);
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_RESET);
+ sbic_arm_write(host, SBIC_OWNID, OWNID_EAF | host->host->this_id);
+ sbic_arm_write(host, SBIC_CMND, CMND_RESET);
return INTR_IDLE;
case 0x01: /* reset state - advanced */
- sbic_arm_write(host->scsi.io_port, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
- sbic_arm_write(host->scsi.io_port, SBIC_TIMEOUT, TIMEOUT_TIME);
- sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
- sbic_arm_write(host->scsi.io_port, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
+ sbic_arm_write(host, SBIC_CTRL, INIT_SBICDMA | CTRL_IDI);
+ sbic_arm_write(host, SBIC_TIMEOUT, TIMEOUT_TIME);
+ sbic_arm_write(host, SBIC_SYNCHTRANSFER, SYNCHTRANSFER_2DBA);
+ sbic_arm_write(host, SBIC_SOURCEID, SOURCEID_ER | SOURCEID_DSP);
msgqueue_flush(&host->scsi.msgs);
return INTR_IDLE;
@@ -2025,10 +2022,10 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
msgqueue_flush(&host->scsi.msgs);
host->dma.transferred = host->scsi.SCp.scsi_xferred;
/* 33C93 gives next interrupt indicating bus phase */
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
+ asr = sbic_arm_read(host, SBIC_ASR);
if (!(asr & ASR_INT))
break;
- ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
ADD_STATUS(8, ssr, host->scsi.phase, 1);
ADD_STATUS(host->SCpnt->device->id, ssr, host->scsi.phase, 1);
goto connected;
@@ -2476,11 +2473,11 @@ acornscsi_intr(int irq, void *dev_id)
do {
ret = INTR_IDLE;
- iostatus = inb(host->card.io_intr);
+ iostatus = readb(host->fast + INT_REG);
if (iostatus & 2) {
acornscsi_dma_intr(host);
- iostatus = inb(host->card.io_intr);
+ iostatus = readb(host->fast + INT_REG);
}
if (iostatus & 8)
@@ -2655,7 +2652,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
* busylun bit.
*/
case PHASE_CONNECTED:
- sbic_arm_write(host->scsi.io_port, SBIC_CMND, CMND_DISCONNECT);
+ sbic_arm_write(host, SBIC_CMND, CMND_DISCONNECT);
host->SCpnt = NULL;
res = res_success_clear;
break;
@@ -2699,8 +2696,8 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
#if (DEBUG & DEBUG_ABORT)
{
int asr, ssr;
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
- ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ asr = sbic_arm_read(host, SBIC_ASR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
printk(KERN_WARNING "acornscsi_abort: ");
print_sbic_status(asr, ssr, host->scsi.phase);
@@ -2731,9 +2728,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#if (DEBUG & DEBUG_ABORT)
printk("success\n");
//#endif
- SCpnt->result = DID_ABORT << 16;
- SCpnt->scsi_done(SCpnt);
- result = SCSI_ABORT_SUCCESS;
+ result = SUCCESS;
break;
/*
@@ -2745,7 +2740,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#if (DEBUG & DEBUG_ABORT)
printk("snooze\n");
//#endif
- result = SCSI_ABORT_SNOOZE;
+ result = FAILED;
break;
/*
@@ -2755,11 +2750,7 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
default:
case res_not_running:
acornscsi_dumplog(host, SCpnt->device->id);
-#if (DEBUG & DEBUG_ABORT)
- result = SCSI_ABORT_SNOOZE;
-#else
- result = SCSI_ABORT_NOT_RUNNING;
-#endif
+ result = FAILED;
//#if (DEBUG & DEBUG_ABORT)
printk("not running\n");
//#endif
@@ -2770,13 +2761,12 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
}
/*
- * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
+ * Prototype: int acornscsi_reset(struct scsi_cmnd *SCpnt)
* Purpose : reset a command on this host/reset this host
* Params : SCpnt - command causing reset
- * result - what type of reset to perform
* Returns : one of SCSI_RESET_ macros
*/
-int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
+int acornscsi_bus_reset(struct scsi_cmnd *SCpnt)
{
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
struct scsi_cmnd *SCptr;
@@ -2787,8 +2777,8 @@ int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
{
int asr, ssr;
- asr = sbic_arm_read(host->scsi.io_port, SBIC_ASR);
- ssr = sbic_arm_read(host->scsi.io_port, SBIC_SSR);
+ asr = sbic_arm_read(host, SBIC_ASR);
+ ssr = sbic_arm_read(host, SBIC_SSR);
printk(KERN_WARNING "acornscsi_reset: ");
print_sbic_status(asr, ssr, host->scsi.phase);
@@ -2798,28 +2788,16 @@ int acornscsi_reset(struct scsi_cmnd *SCpnt, unsigned int reset_flags)
acornscsi_dma_stop(host);
- SCptr = host->SCpnt;
-
/*
* do hard reset. This resets all devices on this host, and so we
* must set the reset status on all commands.
*/
acornscsi_resetcard(host);
- /*
- * report reset on commands current connected/disconnected
- */
- acornscsi_reportstatus(&host->SCpnt, &SCptr, DID_RESET);
-
while ((SCptr = queue_remove(&host->queues.disconnected)) != NULL)
- acornscsi_reportstatus(&SCptr, &SCpnt, DID_RESET);
-
- if (SCpnt) {
- SCpnt->result = DID_RESET << 16;
- SCpnt->scsi_done(SCpnt);
- }
+ ;
- return SCSI_RESET_BUS_RESET | SCSI_RESET_HOST_RESET | SCSI_RESET_SUCCESS;
+ return SUCCESS;
}
/*==============================================================================================
@@ -2850,7 +2828,7 @@ char *acornscsi_info(struct Scsi_Host *host)
" LINK"
#endif
#if (DEBUG & DEBUG_NO_WRITE)
- " NOWRITE ("NO_WRITE_STR")"
+ " NOWRITE (" __stringify(NO_WRITE) ")"
#endif
, host->hostt->name, host->io_port, host->irq,
VER_MAJOR, VER_MINOR, VER_PATCH);
@@ -2881,15 +2859,15 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start,
" LINK"
#endif
#if (DEBUG & DEBUG_NO_WRITE)
- " NOWRITE ("NO_WRITE_STR")"
+ " NOWRITE (" __stringify(NO_WRITE) ")"
#endif
"\n\n", VER_MAJOR, VER_MINOR, VER_PATCH);
- p += sprintf(p, "SBIC: WD33C93A Address: %08X IRQ : %d\n",
- host->scsi.io_port, host->scsi.irq);
+ p += sprintf(p, "SBIC: WD33C93A Address: %p IRQ : %d\n",
+ host->base + SBIC_REGIDX, host->scsi.irq);
#ifdef USE_DMAC
- p += sprintf(p, "DMAC: uPC71071 Address: %08X IRQ : %d\n\n",
- host->dma.io_port, host->scsi.irq);
+ p += sprintf(p, "DMAC: uPC71071 Address: %p IRQ : %d\n\n",
+ host->base + DMAC_OFFSET, host->scsi.irq);
#endif
p += sprintf(p, "Statistics:\n"
@@ -2976,9 +2954,8 @@ static struct scsi_host_template acornscsi_template = {
.name = "AcornSCSI",
.info = acornscsi_info,
.queuecommand = acornscsi_queuecmd,
-#warning fixme
- .abort = acornscsi_abort,
- .reset = acornscsi_reset,
+ .eh_abort_handler = acornscsi_abort,
+ .eh_bus_reset_handler = acornscsi_bus_reset,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = SG_ALL,
@@ -2992,48 +2969,37 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
AS_Host *ashost;
- int ret = -ENOMEM;
+ int ret;
- host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host));
- if (!host)
+ ret = ecard_request_resources(ec);
+ if (ret)
goto out;
+ host = scsi_host_alloc(&acornscsi_template, sizeof(AS_Host));
+ if (!host) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+
ashost = (AS_Host *)host->hostdata;
- host->io_port = ecard_address(ec, ECARD_MEMC, 0);
- host->irq = ec->irq;
+ ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
+ ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
+ if (!ashost->base || !ashost->fast)
+ goto out_put;
- ashost->host = host;
- ashost->scsi.io_port = ioaddr(host->io_port + 0x800);
- ashost->scsi.irq = host->irq;
- ashost->card.io_intr = POD_SPACE(host->io_port) + 0x800;
- ashost->card.io_page = POD_SPACE(host->io_port) + 0xc00;
- ashost->card.io_ram = ioaddr(host->io_port);
- ashost->dma.io_port = host->io_port + 0xc00;
- ashost->dma.io_intr_clear = POD_SPACE(host->io_port) + 0x800;
+ host->irq = ec->irq;
+ ashost->host = host;
+ ashost->scsi.irq = host->irq;
- ec->irqaddr = (char *)ioaddr(ashost->card.io_intr);
+ ec->irqaddr = ashost->fast + INT_REG;
ec->irqmask = 0x0a;
- ret = -EBUSY;
- if (!request_region(host->io_port + 0x800, 2, "acornscsi(sbic)"))
- goto err_1;
- if (!request_region(ashost->card.io_intr, 1, "acornscsi(intr)"))
- goto err_2;
- if (!request_region(ashost->card.io_page, 1, "acornscsi(page)"))
- goto err_3;
-#ifdef USE_DMAC
- if (!request_region(ashost->dma.io_port, 256, "acornscsi(dmac)"))
- goto err_4;
-#endif
- if (!request_region(host->io_port, 2048, "acornscsi(ram)"))
- goto err_5;
-
ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost);
if (ret) {
printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",
host->host_no, ashost->scsi.irq, ret);
- goto err_6;
+ goto out_put;
}
memset(&ashost->stats, 0, sizeof (ashost->stats));
@@ -3045,27 +3011,22 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ret = scsi_add_host(host, &ec->dev);
if (ret)
- goto err_7;
+ goto out_irq;
scsi_scan_host(host);
goto out;
- err_7:
+ out_irq:
free_irq(host->irq, ashost);
- err_6:
- release_region(host->io_port, 2048);
- err_5:
-#ifdef USE_DMAC
- release_region(ashost->dma.io_port, 256);
-#endif
- err_4:
- release_region(ashost->card.io_page, 1);
- err_3:
- release_region(ashost->card.io_intr, 1);
- err_2:
- release_region(host->io_port + 0x800, 2);
- err_1:
+ msgqueue_free(&ashost->scsi.msgs);
+ queue_free(&ashost->queues.disconnected);
+ queue_free(&ashost->queues.issue);
+ out_put:
+ ecardm_iounmap(ec, ashost->fast);
+ ecardm_iounmap(ec, ashost->base);
scsi_host_put(host);
+ out_release:
+ ecard_release_resources(ec);
out:
return ret;
}
@@ -3081,20 +3042,17 @@ static void __devexit acornscsi_remove(struct expansion_card *ec)
/*
* Put card into RESET state
*/
- outb(0x80, ashost->card.io_page);
+ writeb(0x80, ashost->fast + PAGE_REG);
free_irq(host->irq, ashost);
- release_region(host->io_port + 0x800, 2);
- release_region(ashost->card.io_intr, 1);
- release_region(ashost->card.io_page, 1);
- release_region(ashost->dma.io_port, 256);
- release_region(host->io_port, 2048);
-
msgqueue_free(&ashost->scsi.msgs);
queue_free(&ashost->queues.disconnected);
queue_free(&ashost->queues.issue);
+ ecardm_iounmap(ec, ashost->fast);
+ ecardm_iounmap(ec, ashost->base);
scsi_host_put(host);
+ ecard_release_resources(ec);
}
static const struct ecard_id acornscsi_cids[] = {
diff --git a/drivers/scsi/arm/acornscsi.h b/drivers/scsi/arm/acornscsi.h
index d11424b89f42..8d2172a0b351 100644
--- a/drivers/scsi/arm/acornscsi.h
+++ b/drivers/scsi/arm/acornscsi.h
@@ -179,7 +179,6 @@
/* miscellaneous internal variables */
-#define POD_SPACE(x) ((x) + 0xd0000)
#define MASK_ON (MASKREG_M3|MASKREG_M2|MASKREG_M1|MASKREG_M0)
#define MASK_OFF (MASKREG_M3|MASKREG_M2|MASKREG_M1)
@@ -279,10 +278,11 @@ typedef struct acornscsi_hostdata {
struct Scsi_Host *host; /* host */
struct scsi_cmnd *SCpnt; /* currently processing command */
struct scsi_cmnd *origSCpnt; /* original connecting command */
+ void __iomem *base; /* memc base address */
+ void __iomem *fast; /* fast ioc base address */
/* driver information */
struct {
- unsigned int io_port; /* base address of WD33C93 */
unsigned int irq; /* interrupt */
phase_t phase; /* current phase */
@@ -329,8 +329,6 @@ typedef struct acornscsi_hostdata {
/* DMA info */
struct {
- unsigned int io_port; /* base address of DMA controller */
- unsigned int io_intr_clear; /* address of DMA interrupt clear */
unsigned int free_addr; /* next free address */
unsigned int start_addr; /* start address of current transfer */
dmadir_t direction; /* dma direction */
@@ -345,9 +343,6 @@ typedef struct acornscsi_hostdata {
/* card info */
struct {
- unsigned int io_intr; /* base address of interrupt id reg */
- unsigned int io_page; /* base address of page reg */
- unsigned int io_ram; /* base address of RAM access */
unsigned char page_reg; /* current setting of page reg */
} card;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index c4b938bc30d3..aa2011b64683 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -22,6 +22,7 @@
#include <linux/chio.h> /* here are all the ioctls */
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -571,16 +572,19 @@ ch_open(struct inode *inode, struct file *file)
scsi_changer *ch;
int minor = iminor(inode);
+ lock_kernel();
spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
if (NULL == ch || scsi_device_get(ch->device)) {
spin_unlock(&ch_index_lock);
+ unlock_kernel();
return -ENXIO;
}
spin_unlock(&ch_index_lock);
file->private_data = ch;
+ unlock_kernel();
return 0;
}
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
new file mode 100644
index 000000000000..2adc0f666b68
--- /dev/null
+++ b/drivers/scsi/device_handler/Kconfig
@@ -0,0 +1,32 @@
+#
+# SCSI Device Handler configuration
+#
+
+menuconfig SCSI_DH
+ tristate "SCSI Device Handlers"
+ depends on SCSI
+ default n
+ help
+ SCSI Device Handlers provide device specific support for
+ devices utilized in multipath configurations. Say Y here to
+ select support for specific hardware.
+
+config SCSI_DH_RDAC
+ tristate "LSI RDAC Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a LSI RDAC select y. Otherwise, say N.
+
+config SCSI_DH_HP_SW
+ tristate "HP/COMPAQ MSA Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a HP/COMPAQ MSA device that requires START_STOP to
+ be sent to start it and cannot upgrade the firmware then select y.
+ Otherwise, say N.
+
+config SCSI_DH_EMC
+ tristate "EMC CLARiiON Device Handler"
+ depends on SCSI_DH
+ help
+ If you have a EMC CLARiiON select y. Otherwise, say N.
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
new file mode 100644
index 000000000000..35272e93b1c8
--- /dev/null
+++ b/drivers/scsi/device_handler/Makefile
@@ -0,0 +1,7 @@
+#
+# SCSI Device Handler
+#
+obj-$(CONFIG_SCSI_DH) += scsi_dh.o
+obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
+obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
+obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
new file mode 100644
index 000000000000..ab6c21cd9689
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -0,0 +1,162 @@
+/*
+ * SCSI device handler infrastruture.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007
+ * Authors:
+ * Chandra Seetharaman <sekharan@us.ibm.com>
+ * Mike Anderson <andmike@linux.vnet.ibm.com>
+ */
+
+#include <scsi/scsi_dh.h>
+#include "../scsi_priv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+static LIST_HEAD(scsi_dh_list);
+
+static struct scsi_device_handler *get_device_handler(const char *name)
+{
+ struct scsi_device_handler *tmp, *found = NULL;
+
+ spin_lock(&list_lock);
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (!strcmp(tmp->name, name)) {
+ found = tmp;
+ break;
+ }
+ }
+ spin_unlock(&list_lock);
+ return found;
+}
+
+static int scsi_dh_notifier_add(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+
+ scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
+ return 0;
+}
+
+/*
+ * scsi_register_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be registered.
+ *
+ * Returns 0 on success, -EBUSY if handler already registered.
+ */
+int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
+{
+ int ret = -EBUSY;
+ struct scsi_device_handler *tmp;
+
+ tmp = get_device_handler(scsi_dh->name);
+ if (tmp)
+ goto done;
+
+ ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
+ spin_lock(&list_lock);
+ list_add(&scsi_dh->list, &scsi_dh_list);
+ spin_unlock(&list_lock);
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_register_device_handler);
+
+static int scsi_dh_notifier_remove(struct device *dev, void *data)
+{
+ struct scsi_device_handler *scsi_dh = data;
+
+ scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
+ return 0;
+}
+
+/*
+ * scsi_unregister_device_handler - register a device handler personality
+ * module.
+ * @scsi_dh - device handler to be unregistered.
+ *
+ * Returns 0 on success, -ENODEV if handler not registered.
+ */
+int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
+{
+ int ret = -ENODEV;
+ struct scsi_device_handler *tmp;
+
+ tmp = get_device_handler(scsi_dh->name);
+ if (!tmp)
+ goto done;
+
+ ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb);
+
+ bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
+ scsi_dh_notifier_remove);
+ spin_lock(&list_lock);
+ list_del(&scsi_dh->list);
+ spin_unlock(&list_lock);
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
+
+/*
+ * scsi_dh_activate - activate the path associated with the scsi_device
+ * corresponding to the given request queue.
+ * @q - Request queue that is associated with the scsi_device to be
+ * activated.
+ */
+int scsi_dh_activate(struct request_queue *q)
+{
+ int err = 0;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ struct scsi_device_handler *scsi_dh = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (sdev && sdev->scsi_dh_data)
+ scsi_dh = sdev->scsi_dh_data->scsi_dh;
+ if (!scsi_dh || !get_device(&sdev->sdev_gendev))
+ err = SCSI_DH_NOSYS;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (err)
+ return err;
+
+ if (scsi_dh->activate)
+ err = scsi_dh->activate(sdev);
+ put_device(&sdev->sdev_gendev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_activate);
+
+/*
+ * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
+ * the given name. FALSE(0) otherwise.
+ * @name - name of the device handler.
+ */
+int scsi_dh_handler_exist(const char *name)
+{
+ return (get_device_handler(name) != NULL);
+}
+EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
+
+MODULE_DESCRIPTION("SCSI device handler");
+MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
new file mode 100644
index 000000000000..f2467e936e55
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -0,0 +1,504 @@
+/*
+ * Target driver for EMC CLARiiON AX/CX-series hardware.
+ * Based on code from Lars Marowsky-Bree <lmb@suse.de>
+ * and Ed Goggin <egoggin@emc.com>.
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+#include <scsi/scsi_device.h>
+
+#define CLARIION_NAME "emc_clariion"
+
+#define CLARIION_TRESPASS_PAGE 0x22
+#define CLARIION_BUFFER_SIZE 0x80
+#define CLARIION_TIMEOUT (60 * HZ)
+#define CLARIION_RETRIES 3
+#define CLARIION_UNBOUND_LU -1
+
+static unsigned char long_trespass[] = {
+ 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x09, /* Page length - 2 */
+ 0x81, /* Trespass code + Honor reservation bit */
+ 0xff, 0xff, /* Trespass target */
+ 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
+};
+
+static unsigned char long_trespass_hr[] = {
+ 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x09, /* Page length - 2 */
+ 0x01, /* Trespass code + Honor reservation bit */
+ 0xff, 0xff, /* Trespass target */
+ 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
+};
+
+static unsigned char short_trespass[] = {
+ 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x02, /* Page length - 2 */
+ 0x81, /* Trespass code + Honor reservation bit */
+ 0xff, /* Trespass target */
+};
+
+static unsigned char short_trespass_hr[] = {
+ 0, 0, 0, 0,
+ CLARIION_TRESPASS_PAGE, /* Page code */
+ 0x02, /* Page length - 2 */
+ 0x01, /* Trespass code + Honor reservation bit */
+ 0xff, /* Trespass target */
+};
+
+struct clariion_dh_data {
+ /*
+ * Use short trespass command (FC-series) or the long version
+ * (default for AX/CX CLARiiON arrays).
+ */
+ unsigned short_trespass;
+ /*
+ * Whether or not (default) to honor SCSI reservations when
+ * initiating a switch-over.
+ */
+ unsigned hr;
+ /* I/O buffer for both MODE_SELECT and INQUIRY commands. */
+ char buffer[CLARIION_BUFFER_SIZE];
+ /*
+ * SCSI sense buffer for commands -- assumes serial issuance
+ * and completion sequence of all commands for same multipath.
+ */
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */
+ int default_sp;
+ /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */
+ int current_sp;
+};
+
+static inline struct clariion_dh_data
+ *get_clariion_data(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+ BUG_ON(scsi_dh_data == NULL);
+ return ((struct clariion_dh_data *) scsi_dh_data->buf);
+}
+
+/*
+ * Parse MODE_SELECT cmd reply.
+ */
+static int trespass_endio(struct scsi_device *sdev, int result)
+{
+ int err = SCSI_DH_OK;
+ struct scsi_sense_hdr sshdr;
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ char *sense = csdev->sense;
+
+ if (status_byte(result) == CHECK_CONDITION &&
+ scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
+ sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
+ "0x%2x, 0x%2x while sending CLARiiON trespass "
+ "command.\n", sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+
+ if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x00)) {
+ /*
+ * Array based copy in progress -- do not send
+ * mode_select or copy will be aborted mid-stream.
+ */
+ sdev_printk(KERN_INFO, sdev, "Array Based Copy in "
+ "progress while sending CLARiiON trespass "
+ "command.\n");
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x03)) {
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates in-progress ucode upgrade (NDU).
+ */
+ sdev_printk(KERN_INFO, sdev, "Detected in-progress "
+ "ucode upgrade NDU operation while sending "
+ "CLARiiON trespass command.\n");
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else
+ err = SCSI_DH_DEV_FAILED;
+ } else if (result) {
+ sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending "
+ "CLARiiON trespass command.\n", result);
+ err = SCSI_DH_IO;
+ }
+
+ return err;
+}
+
+static int parse_sp_info_reply(struct scsi_device *sdev, int result,
+ int *default_sp, int *current_sp, int *new_current_sp)
+{
+ int err = SCSI_DH_OK;
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+
+ if (result == 0) {
+ /* check for in-progress ucode upgrade (NDU) */
+ if (csdev->buffer[48] != 0) {
+ sdev_printk(KERN_NOTICE, sdev, "Detected in-progress "
+ "ucode upgrade NDU operation while finding "
+ "current active SP.");
+ err = SCSI_DH_DEV_TEMP_BUSY;
+ } else {
+ *default_sp = csdev->buffer[5];
+
+ if (csdev->buffer[4] == 2)
+ /* SP for path is current */
+ *current_sp = csdev->buffer[8];
+ else {
+ if (csdev->buffer[4] == 1)
+ /* SP for this path is NOT current */
+ if (csdev->buffer[8] == 0)
+ *current_sp = 1;
+ else
+ *current_sp = 0;
+ else
+ /* unbound LU or LUNZ */
+ *current_sp = CLARIION_UNBOUND_LU;
+ }
+ *new_current_sp = csdev->buffer[8];
+ }
+ } else {
+ struct scsi_sense_hdr sshdr;
+
+ err = SCSI_DH_IO;
+
+ if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
+ &sshdr))
+ sdev_printk(KERN_ERR, sdev, "Found valid sense data "
+ "0x%2x, 0x%2x, 0x%2x while finding current "
+ "active SP.", sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ else
+ sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
+ "current active SP.", result);
+ }
+
+ return err;
+}
+
+static int sp_info_endio(struct scsi_device *sdev, int result,
+ int mode_select_sent, int *done)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ int err_flags, default_sp, current_sp, new_current_sp;
+
+ err_flags = parse_sp_info_reply(sdev, result, &default_sp,
+ &current_sp, &new_current_sp);
+
+ if (err_flags != SCSI_DH_OK)
+ goto done;
+
+ if (mode_select_sent) {
+ csdev->default_sp = default_sp;
+ csdev->current_sp = current_sp;
+ } else {
+ /*
+ * Issue the actual module_selec request IFF either
+ * (1) we do not know the identity of the current SP OR
+ * (2) what we think we know is actually correct.
+ */
+ if ((current_sp != CLARIION_UNBOUND_LU) &&
+ (new_current_sp != current_sp)) {
+
+ csdev->default_sp = default_sp;
+ csdev->current_sp = current_sp;
+
+ sdev_printk(KERN_INFO, sdev, "Ignoring path group "
+ "switch-over command for CLARiiON SP%s since "
+ " mapped device is already initialized.",
+ current_sp ? "B" : "A");
+ if (done)
+ *done = 1; /* as good as doing it */
+ }
+ }
+done:
+ return err_flags;
+}
+
+/*
+* Get block request for REQ_BLOCK_PC command issued to path. Currently
+* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
+*
+* Uses data and sense buffers in hardware handler context structure and
+* assumes serial servicing of commands, both issuance and completion.
+*/
+static struct request *get_req(struct scsi_device *sdev, int cmd)
+{
+ struct clariion_dh_data *csdev = get_clariion_data(sdev);
+ struct request *rq;
+ unsigned char *page22;
+ int len = 0;
+
+ rq = blk_get_request(sdev->request_queue,
+ (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
+ if (!rq) {
+ sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
+ return NULL;
+ }
+
+ memset(&rq->cmd, 0, BLK_MAX_CDB);
+ rq->cmd[0] = cmd;
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+ switch (cmd) {
+ case MODE_SELECT:
+ if (csdev->short_trespass) {
+ page22 = csdev->hr ? short_trespass_hr : short_trespass;
+ len = sizeof(short_trespass);
+ } else {
+ page22 = csdev->hr ? long_trespass_hr : long_trespass;
+ len = sizeof(long_trespass);
+ }
+ /*
+ * Can't DMA from kernel BSS -- must copy selected trespass
+ * command mode page contents to context buffer which is
+ * allocated by kmalloc.
+ */
+ BUG_ON((len > CLARIION_BUFFER_SIZE));
+ memcpy(csdev->buffer, page22, len);
+ rq->cmd_flags |= REQ_RW;
+ rq->cmd[1] = 0x10;
+ break;
+ case INQUIRY:
+ rq->cmd[1] = 0x1;
+ rq->cmd[2] = 0xC0;
+ len = CLARIION_BUFFER_SIZE;
+ memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ rq->cmd[4] = len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->cmd_flags |= REQ_FAILFAST;
+ rq->timeout = CLARIION_TIMEOUT;
+ rq->retries = CLARIION_RETRIES;
+
+ rq->sense = csdev->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
+ if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
+ len, GFP_ATOMIC)) {
+ __blk_put_request(rq->q, rq);
+ return NULL;
+ }
+
+ return rq;
+}
+
+static int send_cmd(struct scsi_device *sdev, int cmd)
+{
+ struct request *rq = get_req(sdev, cmd);
+
+ if (!rq)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
+
+ return blk_execute_rq(sdev->request_queue, NULL, rq, 1);
+}
+
+static int clariion_activate(struct scsi_device *sdev)
+{
+ int result, done = 0;
+
+ result = send_cmd(sdev, INQUIRY);
+ result = sp_info_endio(sdev, result, 0, &done);
+ if (result || done)
+ goto done;
+
+ result = send_cmd(sdev, MODE_SELECT);
+ result = trespass_endio(sdev, result);
+ if (result)
+ goto done;
+
+ result = send_cmd(sdev, INQUIRY);
+ result = sp_info_endio(sdev, result, 1, NULL);
+done:
+ return result;
+}
+
+static int clariion_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x03)
+ /*
+ * LUN Not Ready - Manual Intervention Required
+ * indicates this is a passive path.
+ *
+ * FIXME: However, if this is seen and EVPD C0
+ * indicates that this is due to a NDU in
+ * progress, we should set FAIL_PATH too.
+ * This indicates we might have to do a SCSI
+ * inquiry in the end_io path. Ugh.
+ *
+ * Can return FAILED only when we want the error
+ * recovery process to kick in.
+ */
+ return SUCCESS;
+ break;
+ case ILLEGAL_REQUEST:
+ if (sense_hdr->asc == 0x25 && sense_hdr->ascq == 0x01)
+ /*
+ * An array based copy is in progress. Do not
+ * fail the path, do not bypass to another PG,
+ * do not retry. Fail the IO immediately.
+ * (Actually this is the same conclusion as in
+ * the default handler, but lets make sure.)
+ *
+ * Can return FAILED only when we want the error
+ * recovery process to kick in.
+ */
+ return SUCCESS;
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Unit Attention Code. This is the first IO
+ * to the new path, so just retry.
+ */
+ return NEEDS_RETRY;
+ break;
+ }
+
+ /* success just means we do not care what scsi-ml does */
+ return SUCCESS;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} clariion_dev_list[] = {
+ {"DGC", "RAID"},
+ {"DGC", "DISK"},
+ {NULL, NULL},
+};
+
+static int clariion_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler clariion_dh = {
+ .name = CLARIION_NAME,
+ .module = THIS_MODULE,
+ .nb.notifier_call = clariion_bus_notify,
+ .check_sense = clariion_check_sense,
+ .activate = clariion_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int clariion_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ struct scsi_dh_data *scsi_dh_data;
+ struct clariion_dh_data *h;
+ int i, found = 0;
+ unsigned long flags;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ for (i = 0; clariion_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+ strlen(clariion_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, clariion_dev_list[i].model,
+ strlen(clariion_dev_list[i].model))) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+ CLARIION_NAME);
+ goto out;
+ }
+
+ scsi_dh_data->scsi_dh = &clariion_dh;
+ h = (struct clariion_dh_data *) scsi_dh_data->buf;
+ h->default_sp = CLARIION_UNBOUND_LU;
+ h->current_sp = CLARIION_UNBOUND_LU;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME);
+ try_module_get(THIS_MODULE);
+
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ if (sdev->scsi_dh_data == NULL ||
+ sdev->scsi_dh_data->scsi_dh != &clariion_dh)
+ goto out;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n",
+ CLARIION_NAME);
+
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ }
+
+out:
+ return 0;
+}
+
+static int __init clariion_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&clariion_dh);
+ if (r != 0)
+ printk(KERN_ERR "Failed to register scsi device handler.");
+ return r;
+}
+
+static void __exit clariion_exit(void)
+{
+ scsi_unregister_device_handler(&clariion_dh);
+}
+
+module_init(clariion_init);
+module_exit(clariion_exit);
+
+MODULE_DESCRIPTION("EMC CX/AX/FC-family driver");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, Chandra Seetharaman <sekharan@us.ibm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
new file mode 100644
index 000000000000..ae6be87d6a83
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -0,0 +1,207 @@
+/*
+ * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be
+ * upgraded.
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define HP_SW_NAME "hp_sw"
+
+#define HP_SW_TIMEOUT (60 * HZ)
+#define HP_SW_RETRIES 3
+
+struct hp_sw_dh_data {
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ int retries;
+};
+
+static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+ BUG_ON(scsi_dh_data == NULL);
+ return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
+}
+
+static int hp_sw_done(struct scsi_device *sdev)
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ struct scsi_sense_hdr sshdr;
+ int rc;
+
+ sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
+
+ rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
+ if (!rc)
+ goto done;
+ switch (sshdr.sense_key) {
+ case NOT_READY:
+ if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
+ rc = SCSI_DH_RETRY;
+ h->retries++;
+ break;
+ }
+ /* fall through */
+ default:
+ h->retries++;
+ rc = SCSI_DH_IMM_RETRY;
+ }
+
+done:
+ if (rc == SCSI_DH_OK || rc == SCSI_DH_IO)
+ h->retries = 0;
+ else if (h->retries > HP_SW_RETRIES) {
+ h->retries = 0;
+ rc = SCSI_DH_IO;
+ }
+ return rc;
+}
+
+static int hp_sw_activate(struct scsi_device *sdev)
+{
+ struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
+ struct request *req;
+ int ret = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC);
+ if (!req)
+ goto done;
+
+ sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
+
+ req->cmd_type = REQ_TYPE_BLOCK_PC;
+ req->cmd_flags |= REQ_FAILFAST;
+ req->cmd_len = COMMAND_SIZE(START_STOP);
+ memset(req->cmd, 0, MAX_COMMAND_SIZE);
+ req->cmd[0] = START_STOP;
+ req->cmd[4] = 1; /* Start spin cycle */
+ req->timeout = HP_SW_TIMEOUT;
+ req->sense = h->sense;
+ memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ req->sense_len = 0;
+
+ ret = blk_execute_rq(req->q, NULL, req, 1);
+ if (!ret) /* SUCCESS */
+ ret = hp_sw_done(sdev);
+ else
+ ret = SCSI_DH_IO;
+done:
+ return ret;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} hp_sw_dh_data_list[] = {
+ {"COMPAQ", "MSA"},
+ {"HP", "HSV"},
+ {"DEC", "HSG80"},
+ {NULL, NULL},
+};
+
+static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler hp_sw_dh = {
+ .name = HP_SW_NAME,
+ .module = THIS_MODULE,
+ .nb.notifier_call = hp_sw_bus_notify,
+ .activate = hp_sw_activate,
+};
+
+static int hp_sw_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ struct scsi_dh_data *scsi_dh_data;
+ int i, found = 0;
+ unsigned long flags;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+ strlen(hp_sw_dh_data_list[i].vendor)) &&
+ !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+ strlen(hp_sw_dh_data_list[i].model))) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
+ HP_SW_NAME);
+ goto out;
+ }
+
+ scsi_dh_data->scsi_dh = &hp_sw_dh;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ try_module_get(THIS_MODULE);
+
+ sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ if (sdev->scsi_dh_data == NULL ||
+ sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
+ goto out;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ module_put(THIS_MODULE);
+
+ sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME);
+
+ kfree(scsi_dh_data);
+ }
+
+out:
+ return 0;
+}
+
+static int __init hp_sw_init(void)
+{
+ return scsi_register_device_handler(&hp_sw_dh);
+}
+
+static void __exit hp_sw_exit(void)
+{
+ scsi_unregister_device_handler(&hp_sw_dh);
+}
+
+module_init(hp_sw_init);
+module_exit(hp_sw_exit);
+
+MODULE_DESCRIPTION("HP MSA 1000");
+MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
new file mode 100644
index 000000000000..fdf34b0ec6e1
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -0,0 +1,696 @@
+/*
+ * Engenio/LSI RDAC SCSI Device Handler
+ *
+ * Copyright (C) 2005 Mike Christie. All rights reserved.
+ * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_dh.h>
+
+#define RDAC_NAME "rdac"
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+#define RDAC_QUIESCENCE_TIME 20;
+/*
+ * Page Codes
+ */
+#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions
+ */
+#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+#define RDAC_TIMEOUT (60 * HZ)
+#define RDAC_RETRIES 3
+
+struct rdac_mode_6_hdr {
+ u8 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u8 block_desc_len;
+};
+
+struct rdac_mode_10_hdr {
+ u16 data_len;
+ u8 medium_type;
+ u8 device_params;
+ u16 reserved;
+ u16 block_desc_len;
+};
+
+struct rdac_mode_common {
+ u8 controller_serial[16];
+ u8 alt_controller_serial[16];
+ u8 rdac_mode[2];
+ u8 alt_rdac_mode[2];
+ u8 quiescence_timeout;
+ u8 rdac_options;
+};
+
+struct rdac_pg_legacy {
+ struct rdac_mode_6_hdr hdr;
+ u8 page_code;
+ u8 page_len;
+ struct rdac_mode_common common;
+#define MODE6_MAX_LUN 32
+ u8 lun_table[MODE6_MAX_LUN];
+ u8 reserved2[32];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct rdac_pg_expanded {
+ struct rdac_mode_10_hdr hdr;
+ u8 page_code;
+ u8 subpage_code;
+ u8 page_len[2];
+ struct rdac_mode_common common;
+ u8 lun_table[256];
+ u8 reserved3;
+ u8 reserved4;
+};
+
+struct c9_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC9 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "vace" */
+ u8 avte_cvp;
+ u8 path_prio;
+ u8 reserved2[38];
+};
+
+#define SUBSYS_ID_LEN 16
+#define SLOT_ID_LEN 2
+
+struct c4_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC4 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "subs" */
+ u8 subsys_id[SUBSYS_ID_LEN];
+ u8 revision[4];
+ u8 slot_id[SLOT_ID_LEN];
+ u8 reserved[2];
+};
+
+struct rdac_controller {
+ u8 subsys_id[SUBSYS_ID_LEN];
+ u8 slot_id[SLOT_ID_LEN];
+ int use_ms10;
+ struct kref kref;
+ struct list_head node; /* list of all controllers */
+ union {
+ struct rdac_pg_legacy legacy;
+ struct rdac_pg_expanded expanded;
+ } mode_select;
+};
+struct c8_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC8 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "edid" */
+ u8 reserved2[3];
+ u8 vol_uniq_id_len;
+ u8 vol_uniq_id[16];
+ u8 vol_user_label_len;
+ u8 vol_user_label[60];
+ u8 array_uniq_id_len;
+ u8 array_unique_id[16];
+ u8 array_user_label_len;
+ u8 array_user_label[60];
+ u8 lun[8];
+};
+
+struct c2_inquiry {
+ u8 peripheral_info;
+ u8 page_code; /* 0xC2 */
+ u8 reserved1;
+ u8 page_len;
+ u8 page_id[4]; /* "swr4" */
+ u8 sw_version[3];
+ u8 sw_date[3];
+ u8 features_enabled;
+ u8 max_lun_supported;
+ u8 partitions[239]; /* Total allocation length should be 0xFF */
+};
+
+struct rdac_dh_data {
+ struct rdac_controller *ctlr;
+#define UNINITIALIZED_LUN (1 << 8)
+ unsigned lun;
+#define RDAC_STATE_ACTIVE 0
+#define RDAC_STATE_PASSIVE 1
+ unsigned char state;
+ unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ union {
+ struct c2_inquiry c2;
+ struct c4_inquiry c4;
+ struct c8_inquiry c8;
+ struct c9_inquiry c9;
+ } inq;
+};
+
+static LIST_HEAD(ctlr_list);
+static DEFINE_SPINLOCK(list_lock);
+
+static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
+{
+ struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
+ BUG_ON(scsi_dh_data == NULL);
+ return ((struct rdac_dh_data *) scsi_dh_data->buf);
+}
+
+static struct request *get_rdac_req(struct scsi_device *sdev,
+ void *buffer, unsigned buflen, int rw)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ rq = blk_get_request(q, rw, GFP_KERNEL);
+
+ if (!rq) {
+ sdev_printk(KERN_INFO, sdev,
+ "get_rdac_req: blk_get_request failed.\n");
+ return NULL;
+ }
+
+ if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) {
+ blk_put_request(rq);
+ sdev_printk(KERN_INFO, sdev,
+ "get_rdac_req: blk_rq_map_kern failed.\n");
+ return NULL;
+ }
+
+ memset(&rq->cmd, 0, BLK_MAX_CDB);
+ rq->sense = h->sense;
+ memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+ rq->sense_len = 0;
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
+ rq->retries = RDAC_RETRIES;
+ rq->timeout = RDAC_TIMEOUT;
+
+ return rq;
+}
+
+static struct request *rdac_failover_get(struct scsi_device *sdev)
+{
+ struct request *rq;
+ struct rdac_mode_common *common;
+ unsigned data_size;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ if (h->ctlr->use_ms10) {
+ struct rdac_pg_expanded *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_expanded);
+ rdac_pg = &h->ctlr->mode_select.expanded;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
+ rdac_pg->subpage_code = 0x1;
+ rdac_pg->page_len[0] = 0x01;
+ rdac_pg->page_len[1] = 0x28;
+ rdac_pg->lun_table[h->lun] = 0x81;
+ } else {
+ struct rdac_pg_legacy *rdac_pg;
+
+ data_size = sizeof(struct rdac_pg_legacy);
+ rdac_pg = &h->ctlr->mode_select.legacy;
+ memset(rdac_pg, 0, data_size);
+ common = &rdac_pg->common;
+ rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
+ rdac_pg->page_len = 0x68;
+ rdac_pg->lun_table[h->lun] = 0x81;
+ }
+ common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
+ common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+ common->rdac_options = RDAC_FORCED_QUIESENCE;
+
+ /* get request for block layer packet command */
+ rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
+ if (!rq)
+ return NULL;
+
+ /* Prepare the command. */
+ if (h->ctlr->use_ms10) {
+ rq->cmd[0] = MODE_SELECT_10;
+ rq->cmd[7] = data_size >> 8;
+ rq->cmd[8] = data_size & 0xff;
+ } else {
+ rq->cmd[0] = MODE_SELECT;
+ rq->cmd[4] = data_size;
+ }
+ rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+ return rq;
+}
+
+static void release_controller(struct kref *kref)
+{
+ struct rdac_controller *ctlr;
+ ctlr = container_of(kref, struct rdac_controller, kref);
+
+ spin_lock(&list_lock);
+ list_del(&ctlr->node);
+ spin_unlock(&list_lock);
+ kfree(ctlr);
+}
+
+static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
+{
+ struct rdac_controller *ctlr, *tmp;
+
+ spin_lock(&list_lock);
+
+ list_for_each_entry(tmp, &ctlr_list, node) {
+ if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
+ (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+ kref_get(&tmp->kref);
+ spin_unlock(&list_lock);
+ return tmp;
+ }
+ }
+ ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
+ if (!ctlr)
+ goto done;
+
+ /* initialize fields of controller */
+ memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
+ memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+ kref_init(&ctlr->kref);
+ ctlr->use_ms10 = -1;
+ list_add(&ctlr->node, &ctlr_list);
+done:
+ spin_unlock(&list_lock);
+ return ctlr;
+}
+
+static int submit_inquiry(struct scsi_device *sdev, int page_code,
+ unsigned int len)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = get_rdac_req(sdev, &h->inq, len, READ);
+ if (!rq)
+ goto done;
+
+ /* Prepare the command. */
+ rq->cmd[0] = INQUIRY;
+ rq->cmd[1] = 1;
+ rq->cmd[2] = page_code;
+ rq->cmd[4] = len;
+ rq->cmd_len = COMMAND_SIZE(INQUIRY);
+ err = blk_execute_rq(q, NULL, rq, 1);
+ if (err == -EIO)
+ err = SCSI_DH_IO;
+done:
+ return err;
+}
+
+static int get_lun(struct scsi_device *sdev)
+{
+ int err;
+ struct c8_inquiry *inqp;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry));
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c8;
+ h->lun = inqp->lun[7]; /* currently it uses only one byte */
+ }
+ return err;
+}
+
+#define RDAC_OWNED 0
+#define RDAC_UNOWNED 1
+#define RDAC_FAILED 2
+static int check_ownership(struct scsi_device *sdev)
+{
+ int err;
+ struct c9_inquiry *inqp;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry));
+ if (err == SCSI_DH_OK) {
+ err = RDAC_UNOWNED;
+ inqp = &h->inq.c9;
+ /*
+ * If in AVT mode or if the path already owns the LUN,
+ * return RDAC_OWNED;
+ */
+ if (((inqp->avte_cvp >> 7) == 0x1) ||
+ ((inqp->avte_cvp & 0x1) != 0))
+ err = RDAC_OWNED;
+ } else
+ err = RDAC_FAILED;
+ return err;
+}
+
+static int initialize_controller(struct scsi_device *sdev)
+{
+ int err;
+ struct c4_inquiry *inqp;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry));
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c4;
+ h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
+ if (!h->ctlr)
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
+ }
+ return err;
+}
+
+static int set_mode_select(struct scsi_device *sdev)
+{
+ int err;
+ struct c2_inquiry *inqp;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+
+ err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry));
+ if (err == SCSI_DH_OK) {
+ inqp = &h->inq.c2;
+ /*
+ * If more than MODE6_MAX_LUN luns are supported, use
+ * mode select 10
+ */
+ if (inqp->max_lun_supported >= MODE6_MAX_LUN)
+ h->ctlr->use_ms10 = 1;
+ else
+ h->ctlr->use_ms10 = 0;
+ }
+ return err;
+}
+
+static int mode_select_handle_sense(struct scsi_device *sdev)
+{
+ struct scsi_sense_hdr sense_hdr;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int sense, err = SCSI_DH_IO, ret;
+
+ ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+ if (!ret)
+ goto done;
+
+ err = SCSI_DH_OK;
+ sense = (sense_hdr.sense_key << 16) | (sense_hdr.asc << 8) |
+ sense_hdr.ascq;
+ /* If it is retryable failure, submit the c9 inquiry again */
+ if (sense == 0x59136 || sense == 0x68b02 || sense == 0xb8b02 ||
+ sense == 0x62900) {
+ /* 0x59136 - Command lock contention
+ * 0x[6b]8b02 - Quiesense in progress or achieved
+ * 0x62900 - Power On, Reset, or Bus Device Reset
+ */
+ err = SCSI_DH_RETRY;
+ }
+
+ if (sense)
+ sdev_printk(KERN_INFO, sdev,
+ "MODE_SELECT failed with sense 0x%x.\n", sense);
+done:
+ return err;
+}
+
+static int send_mode_select(struct scsi_device *sdev)
+{
+ struct request *rq;
+ struct request_queue *q = sdev->request_queue;
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int err = SCSI_DH_RES_TEMP_UNAVAIL;
+
+ rq = rdac_failover_get(sdev);
+ if (!rq)
+ goto done;
+
+ sdev_printk(KERN_INFO, sdev, "queueing MODE_SELECT command.\n");
+
+ err = blk_execute_rq(q, NULL, rq, 1);
+ if (err != SCSI_DH_OK)
+ err = mode_select_handle_sense(sdev);
+ if (err == SCSI_DH_OK)
+ h->state = RDAC_STATE_ACTIVE;
+done:
+ return err;
+}
+
+static int rdac_activate(struct scsi_device *sdev)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int err = SCSI_DH_OK;
+
+ if (h->lun == UNINITIALIZED_LUN) {
+ err = get_lun(sdev);
+ if (err != SCSI_DH_OK)
+ goto done;
+ }
+
+ err = check_ownership(sdev);
+ switch (err) {
+ case RDAC_UNOWNED:
+ break;
+ case RDAC_OWNED:
+ err = SCSI_DH_OK;
+ goto done;
+ case RDAC_FAILED:
+ default:
+ err = SCSI_DH_IO;
+ goto done;
+ }
+
+ if (!h->ctlr) {
+ err = initialize_controller(sdev);
+ if (err != SCSI_DH_OK)
+ goto done;
+ }
+
+ if (h->ctlr->use_ms10 == -1) {
+ err = set_mode_select(sdev);
+ if (err != SCSI_DH_OK)
+ goto done;
+ }
+
+ err = send_mode_select(sdev);
+done:
+ return err;
+}
+
+static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ int ret = BLKPREP_OK;
+
+ if (h->state != RDAC_STATE_ACTIVE) {
+ ret = BLKPREP_KILL;
+ req->cmd_flags |= REQ_QUIET;
+ }
+ return ret;
+
+}
+
+static int rdac_check_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sense_hdr)
+{
+ struct rdac_dh_data *h = get_rdac_data(sdev);
+ switch (sense_hdr->sense_key) {
+ case NOT_READY:
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
+ /* LUN Not Ready - Storage firmware incompatible
+ * Manual code synchonisation required.
+ *
+ * Nothing we can do here. Try to bypass the path.
+ */
+ return SUCCESS;
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
+ /* LUN Not Ready - Quiescense in progress
+ *
+ * Just retry and wait.
+ */
+ return NEEDS_RETRY;
+ break;
+ case ILLEGAL_REQUEST:
+ if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
+ /* Invalid Request - Current Logical Unit Ownership.
+ * Controller is not the current owner of the LUN,
+ * Fail the path, so that the other path be used.
+ */
+ h->state = RDAC_STATE_PASSIVE;
+ return SUCCESS;
+ }
+ break;
+ case UNIT_ATTENTION:
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ /*
+ * Power On, Reset, or Bus Device Reset, just retry.
+ */
+ return NEEDS_RETRY;
+ break;
+ }
+ /* success just means we do not care what scsi-ml does */
+ return SCSI_RETURN_NOT_HANDLED;
+}
+
+static const struct {
+ char *vendor;
+ char *model;
+} rdac_dev_list[] = {
+ {"IBM", "1722"},
+ {"IBM", "1724"},
+ {"IBM", "1726"},
+ {"IBM", "1742"},
+ {"IBM", "1814"},
+ {"IBM", "1815"},
+ {"IBM", "1818"},
+ {"IBM", "3526"},
+ {"SGI", "TP9400"},
+ {"SGI", "TP9500"},
+ {"SGI", "IS"},
+ {"STK", "OPENstorage D280"},
+ {"SUN", "CSM200_R"},
+ {"SUN", "LCSM100_F"},
+ {NULL, NULL},
+};
+
+static int rdac_bus_notify(struct notifier_block *, unsigned long, void *);
+
+static struct scsi_device_handler rdac_dh = {
+ .name = RDAC_NAME,
+ .module = THIS_MODULE,
+ .nb.notifier_call = rdac_bus_notify,
+ .prep_fn = rdac_prep_fn,
+ .check_sense = rdac_check_sense,
+ .activate = rdac_activate,
+};
+
+/*
+ * TODO: need some interface so we can set trespass values
+ */
+static int rdac_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct scsi_device *sdev;
+ struct scsi_dh_data *scsi_dh_data;
+ struct rdac_dh_data *h;
+ int i, found = 0;
+ unsigned long flags;
+
+ if (!scsi_is_sdev_device(dev))
+ return 0;
+
+ sdev = to_scsi_device(dev);
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ for (i = 0; rdac_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+ strlen(rdac_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, rdac_dev_list[i].model,
+ strlen(rdac_dev_list[i].model))) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ + sizeof(*h) , GFP_KERNEL);
+ if (!scsi_dh_data) {
+ sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
+ RDAC_NAME);
+ goto out;
+ }
+
+ scsi_dh_data->scsi_dh = &rdac_dh;
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ h->lun = UNINITIALIZED_LUN;
+ h->state = RDAC_STATE_ACTIVE;
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ sdev->scsi_dh_data = scsi_dh_data;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ try_module_get(THIS_MODULE);
+
+ sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME);
+
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ if (sdev->scsi_dh_data == NULL ||
+ sdev->scsi_dh_data->scsi_dh != &rdac_dh)
+ goto out;
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
+ scsi_dh_data = sdev->scsi_dh_data;
+ sdev->scsi_dh_data = NULL;
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ if (h->ctlr)
+ kref_put(&h->ctlr->kref, release_controller);
+ kfree(scsi_dh_data);
+ module_put(THIS_MODULE);
+ sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
+ }
+
+out:
+ return 0;
+}
+
+static int __init rdac_init(void)
+{
+ int r;
+
+ r = scsi_register_device_handler(&rdac_dh);
+ if (r != 0)
+ printk(KERN_ERR "Failed to register scsi device handler.");
+ return r;
+}
+
+static void __exit rdac_exit(void)
+{
+ scsi_unregister_device_handler(&rdac_dh);
+}
+
+module_init(rdac_init);
+module_exit(rdac_exit);
+
+MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
+MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8508816f303d..2bc30e32b67a 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -49,6 +49,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
#include <linux/kernel.h> /* for printk */
#include <linux/sched.h>
#include <linux/reboot.h>
+#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
@@ -1727,10 +1728,12 @@ static int adpt_open(struct inode *inode, struct file *file)
int minor;
adpt_hba* pHba;
+ lock_kernel();
//TODO check for root access
//
minor = iminor(inode);
if (minor >= hba_count) {
+ unlock_kernel();
return -ENXIO;
}
mutex_lock(&adpt_configuration_lock);
@@ -1741,6 +1744,7 @@ static int adpt_open(struct inode *inode, struct file *file)
}
if (pHba == NULL) {
mutex_unlock(&adpt_configuration_lock);
+ unlock_kernel();
return -ENXIO;
}
@@ -1751,6 +1755,7 @@ static int adpt_open(struct inode *inode, struct file *file)
pHba->in_use = 1;
mutex_unlock(&adpt_configuration_lock);
+ unlock_kernel();
return 0;
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 59fbef08d690..62a4618530d0 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -219,19 +219,10 @@ static void esp_reset_esp(struct esp *esp)
/* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
+ if (esp->rev == FAST)
+ esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
- /* Reload the configuration registers */
- esp_write8(esp->cfact, ESP_CFACT);
-
- esp->prev_stp = 0;
- esp_write8(esp->prev_stp, ESP_STP);
-
- esp->prev_soff = 0;
- esp_write8(esp->prev_soff, ESP_SOFF);
-
- esp_write8(esp->neg_defp, ESP_TIMEO);
-
/* This is the only point at which it is reliable to read
* the ID-code for a fast ESP chip variants.
*/
@@ -316,6 +307,17 @@ static void esp_reset_esp(struct esp *esp)
break;
}
+ /* Reload the configuration registers */
+ esp_write8(esp->cfact, ESP_CFACT);
+
+ esp->prev_stp = 0;
+ esp_write8(esp->prev_stp, ESP_STP);
+
+ esp->prev_soff = 0;
+ esp_write8(esp->prev_soff, ESP_SOFF);
+
+ esp_write8(esp->neg_defp, ESP_TIMEO);
+
/* Eat any bitrot in the chip */
esp_read8(ESP_INTRPT);
udelay(100);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 46771d4c81bd..822d5214692b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -120,6 +120,7 @@
#include <linux/timer.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/smp_lock.h>
#ifdef GDTH_RTC
#include <linux/mc146818rtc.h>
@@ -4019,10 +4020,12 @@ static int gdth_open(struct inode *inode, struct file *filep)
{
gdth_ha_str *ha;
+ lock_kernel();
list_for_each_entry(ha, &gdth_instances, list) {
if (!ha->sdev)
ha->sdev = scsi_get_host_dev(ha->shost);
}
+ unlock_kernel();
TRACE(("gdth_open()\n"));
return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index c6457bfc8a49..35cd892dce04 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -290,7 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost);
}
-struct device_type scsi_host_type = {
+static struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
};
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index 6ac0633d5452..a423d9633625 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -5,3 +5,4 @@ ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
+obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
new file mode 100644
index 000000000000..eb702b96d57c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -0,0 +1,3910 @@
+/*
+ * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <linux/stringify.h>
+#include <asm/firmware.h>
+#include <asm/irq.h>
+#include <asm/vio.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include "ibmvfc.h"
+
+static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
+static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
+static unsigned int max_lun = IBMVFC_MAX_LUN;
+static unsigned int max_targets = IBMVFC_MAX_TARGETS;
+static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
+static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO;
+static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
+static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static LIST_HEAD(ibmvfc_head);
+static DEFINE_SPINLOCK(ibmvfc_driver_lock);
+static struct scsi_transport_template *ibmvfc_transport_template;
+
+MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
+MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+
+module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
+ "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
+module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_timeout,
+ "Default timeout in seconds for initialization and EH commands. "
+ "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
+module_param_named(max_requests, max_requests, uint, S_IRUGO);
+MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
+ "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(max_lun, max_lun, uint, S_IRUGO);
+MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
+ "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
+module_param_named(max_targets, max_targets, uint, S_IRUGO);
+MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
+ "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
+module_param_named(disc_threads, disc_threads, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
+ "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
+module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable driver debug information. "
+ "[Default=" __stringify(IBMVFC_DEBUG) "]");
+module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC "
+ "transport should insulate the loss of a remote port. Once this "
+ "value is exceeded, the scsi target is removed. "
+ "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]");
+module_param_named(log_level, log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
+ "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+
+static const struct {
+ u16 status;
+ u16 error;
+ u8 result;
+ u8 retry;
+ int log;
+ char *name;
+} cmd_status [] = {
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_NO_CONNECT, 1, 1, "network down" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+ { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
+
+ { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ABORT, 0, 1, "invalid parameter" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ABORT, 0, 1, "missing parameter" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ABORT, 0, 1, "transaction cancelled" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ABORT, 0, 1, "transaction cancelled implicit" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
+ { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
+
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
+ { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
+ { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
+ { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
+ { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
+ { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
+ { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
+ { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
+ { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
+ { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
+
+ { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+};
+
+static void ibmvfc_npiv_login(struct ibmvfc_host *);
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+
+static const char *unknown_error = "unknown error";
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_trc_start - Log a start trace entry
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_start(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
+ struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
+ struct ibmvfc_trace_entry *entry;
+
+ entry = &vhost->trace[vhost->trace_index++];
+ entry->evt = evt;
+ entry->time = jiffies;
+ entry->fmt = evt->crq.format;
+ entry->type = IBMVFC_TRC_START;
+
+ switch (entry->fmt) {
+ case IBMVFC_CMD_FORMAT:
+ entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->scsi_id = vfc_cmd->tgt_scsi_id;
+ entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+ entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
+ break;
+ case IBMVFC_MAD_FORMAT:
+ entry->op_code = mad->opcode;
+ break;
+ default:
+ break;
+ };
+}
+
+/**
+ * ibmvfc_trc_end - Log an end trace entry
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_trc_end(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
+ struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+
+ entry->evt = evt;
+ entry->time = jiffies;
+ entry->fmt = evt->crq.format;
+ entry->type = IBMVFC_TRC_END;
+
+ switch (entry->fmt) {
+ case IBMVFC_CMD_FORMAT:
+ entry->op_code = vfc_cmd->iu.cdb[0];
+ entry->scsi_id = vfc_cmd->tgt_scsi_id;
+ entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
+ entry->tmf_flags = vfc_cmd->iu.tmf_flags;
+ entry->u.end.status = vfc_cmd->status;
+ entry->u.end.error = vfc_cmd->error;
+ entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
+ entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
+ entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
+ break;
+ case IBMVFC_MAD_FORMAT:
+ entry->op_code = mad->opcode;
+ entry->u.end.status = mad->status;
+ break;
+ default:
+ break;
+
+ };
+}
+
+#else
+#define ibmvfc_trc_start(evt) do { } while (0)
+#define ibmvfc_trc_end(evt) do { } while (0)
+#endif
+
+/**
+ * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * index into cmd_status / -EINVAL on failure
+ **/
+static int ibmvfc_get_err_index(u16 status, u16 error)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
+ if ((cmd_status[i].status & status) == cmd_status[i].status &&
+ cmd_status[i].error == error)
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * ibmvfc_get_cmd_error - Find the error description for the fcp response
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * error description string
+ **/
+static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
+{
+ int rc = ibmvfc_get_err_index(status, error);
+ if (rc >= 0)
+ return cmd_status[rc].name;
+ return unknown_error;
+}
+
+/**
+ * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
+ * @vfc_cmd: ibmvfc command struct
+ *
+ * Return value:
+ * SCSI result value to return for completed command
+ **/
+static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
+{
+ int err;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ int fc_rsp_len = rsp->fcp_rsp_len;
+
+ if ((rsp->flags & FCP_RSP_LEN_VALID) &&
+ ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+ rsp->data.info.rsp_code))
+ return DID_ERROR << 16;
+
+ if (!vfc_cmd->status) {
+ if (rsp->flags & FCP_RESID_OVER)
+ return rsp->scsi_status | (DID_ERROR << 16);
+ else
+ return rsp->scsi_status | (DID_OK << 16);
+ }
+
+ err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+ if (err >= 0)
+ return rsp->scsi_status | (cmd_status[err].result << 16);
+ return rsp->scsi_status | (DID_ERROR << 16);
+}
+
+/**
+ * ibmvfc_retry_cmd - Determine if error status is retryable
+ * @status: status / error class
+ * @error: error
+ *
+ * Return value:
+ * 1 if error should be retried / 0 if it should not
+ **/
+static int ibmvfc_retry_cmd(u16 status, u16 error)
+{
+ int rc = ibmvfc_get_err_index(status, error);
+
+ if (rc >= 0)
+ return cmd_status[rc].retry;
+ return 1;
+}
+
+static const char *unknown_fc_explain = "unknown fc explain";
+
+static const struct {
+ u16 fc_explain;
+ char *name;
+} ls_explain [] = {
+ { 0x00, "no additional explanation" },
+ { 0x01, "service parameter error - options" },
+ { 0x03, "service parameter error - initiator control" },
+ { 0x05, "service parameter error - recipient control" },
+ { 0x07, "service parameter error - received data field size" },
+ { 0x09, "service parameter error - concurrent seq" },
+ { 0x0B, "service parameter error - credit" },
+ { 0x0D, "invalid N_Port/F_Port_Name" },
+ { 0x0E, "invalid node/Fabric Name" },
+ { 0x0F, "invalid common service parameters" },
+ { 0x11, "invalid association header" },
+ { 0x13, "association header required" },
+ { 0x15, "invalid originator S_ID" },
+ { 0x17, "invalid OX_ID-RX-ID combination" },
+ { 0x19, "command (request) already in progress" },
+ { 0x1E, "N_Port Login requested" },
+ { 0x1F, "Invalid N_Port_ID" },
+};
+
+static const struct {
+ u16 fc_explain;
+ char *name;
+} gs_explain [] = {
+ { 0x00, "no additional explanation" },
+ { 0x01, "port identifier not registered" },
+ { 0x02, "port name not registered" },
+ { 0x03, "node name not registered" },
+ { 0x04, "class of service not registered" },
+ { 0x06, "initial process associator not registered" },
+ { 0x07, "FC-4 TYPEs not registered" },
+ { 0x08, "symbolic port name not registered" },
+ { 0x09, "symbolic node name not registered" },
+ { 0x0A, "port type not registered" },
+ { 0xF0, "authorization exception" },
+ { 0xF1, "authentication exception" },
+ { 0xF2, "data base full" },
+ { 0xF3, "data base empty" },
+ { 0xF4, "processing request" },
+ { 0xF5, "unable to verify connection" },
+ { 0xF6, "devices not in a common zone" },
+};
+
+/**
+ * ibmvfc_get_ls_explain - Return the FC Explain description text
+ * @status: FC Explain status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_ls_explain(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
+ if (ls_explain[i].fc_explain == status)
+ return ls_explain[i].name;
+
+ return unknown_fc_explain;
+}
+
+/**
+ * ibmvfc_get_gs_explain - Return the FC Explain description text
+ * @status: FC Explain status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_gs_explain(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
+ if (gs_explain[i].fc_explain == status)
+ return gs_explain[i].name;
+
+ return unknown_fc_explain;
+}
+
+static const struct {
+ enum ibmvfc_fc_type fc_type;
+ char *name;
+} fc_type [] = {
+ { IBMVFC_FABRIC_REJECT, "fabric reject" },
+ { IBMVFC_PORT_REJECT, "port reject" },
+ { IBMVFC_LS_REJECT, "ELS reject" },
+ { IBMVFC_FABRIC_BUSY, "fabric busy" },
+ { IBMVFC_PORT_BUSY, "port busy" },
+ { IBMVFC_BASIC_REJECT, "basic reject" },
+};
+
+static const char *unknown_fc_type = "unknown fc type";
+
+/**
+ * ibmvfc_get_fc_type - Return the FC Type description text
+ * @status: FC Type error status
+ *
+ * Returns:
+ * error string
+ **/
+static const char *ibmvfc_get_fc_type(u16 status)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fc_type); i++)
+ if (fc_type[i].fc_type == status)
+ return fc_type[i].name;
+
+ return unknown_fc_type;
+}
+
+/**
+ * ibmvfc_set_tgt_action - Set the next init action for the target
+ * @tgt: ibmvfc target struct
+ * @action: action to perform
+ *
+ **/
+static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
+ enum ibmvfc_target_action action)
+{
+ switch (tgt->action) {
+ case IBMVFC_TGT_ACTION_DEL_RPORT:
+ break;
+ default:
+ tgt->action = action;
+ break;
+ }
+}
+
+/**
+ * ibmvfc_set_host_state - Set the state for the host
+ * @vhost: ibmvfc host struct
+ * @state: state to set host to
+ *
+ * Returns:
+ * 0 if state changed / non-zero if not changed
+ **/
+static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_state state)
+{
+ int rc = 0;
+
+ switch (vhost->state) {
+ case IBMVFC_HOST_OFFLINE:
+ rc = -EINVAL;
+ break;
+ default:
+ vhost->state = state;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * ibmvfc_set_host_action - Set the next init action for the host
+ * @vhost: ibmvfc host struct
+ * @action: action to perform
+ *
+ **/
+static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_action action)
+{
+ switch (action) {
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_QUERY:
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_TGT_ADD:
+ vhost->action = action;
+ break;
+ default:
+ break;
+ };
+ break;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
+ vhost->action = action;
+ break;
+ case IBMVFC_HOST_ACTION_INIT:
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_ADD:
+ case IBMVFC_HOST_ACTION_NONE:
+ default:
+ vhost->action = action;
+ break;
+ };
+}
+
+/**
+ * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
+{
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ } else
+ vhost->reinit = 1;
+
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_link_down - Handle a link down event from the adapter
+ * @vhost: ibmvfc host struct
+ * @state: ibmvfc host state to enter
+ *
+ **/
+static void ibmvfc_link_down(struct ibmvfc_host *vhost,
+ enum ibmvfc_host_state state)
+{
+ struct ibmvfc_target *tgt;
+
+ ENTER;
+ scsi_block_requests(vhost->host);
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_set_host_state(vhost, state);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+ vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
+ wake_up(&vhost->work_wait_q);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_init_host - Start host initialization
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * nothing
+ **/
+static void ibmvfc_init_host(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+ if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ dev_err(vhost->dev,
+ "Host initialization retries exceeded. Taking adapter offline\n");
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ return;
+ }
+ }
+
+ if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ tgt->need_login = 1;
+ scsi_block_requests(vhost->host);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ vhost->job_step = ibmvfc_npiv_login;
+ wake_up(&vhost->work_wait_q);
+ }
+}
+
+/**
+ * ibmvfc_send_crq - Send a CRQ
+ * @vhost: ibmvfc host struct
+ * @word1: the first 64 bits of the data
+ * @word2: the second 64 bits of the data
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
+}
+
+/**
+ * ibmvfc_send_crq_init - Send a CRQ init message
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
+{
+ ibmvfc_dbg(vhost, "Sending CRQ init\n");
+ return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
+{
+ ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
+ return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
+}
+
+/**
+ * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
+ * @vhost: ibmvfc host struct
+ *
+ * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
+ * the crq with the hypervisor.
+ **/
+static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
+{
+ long rc;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ ibmvfc_dbg(vhost, "Releasing CRQ\n");
+ free_irq(vdev->irq, vhost);
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ vhost->state = IBMVFC_NO_CRQ;
+ dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)crq->msgs);
+}
+
+/**
+ * ibmvfc_reenable_crq_queue - reenables the CRQ
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
+{
+ int rc;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ /* Re-enable the CRQ */
+ do {
+ rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
+ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_reset_crq - resets a crq after a failure
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
+{
+ int rc;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ /* Close the CRQ */
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ vhost->state = IBMVFC_NO_CRQ;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+
+ /* Clean out the queue */
+ memset(crq->msgs, 0, PAGE_SIZE);
+ crq->cur = 0;
+
+ /* And re-open it again */
+ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_CLOSED)
+ /* Adapter is good, but other end is not ready */
+ dev_warn(vhost->dev, "Partner adapter not ready\n");
+ else if (rc != 0)
+ dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_valid_event - Determines if event is valid.
+ * @pool: event_pool that contains the event
+ * @evt: ibmvfc event to be checked for validity
+ *
+ * Return value:
+ * 1 if event is valid / 0 if event is not valid
+ **/
+static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
+ struct ibmvfc_event *evt)
+{
+ int index = evt - pool->events;
+ if (index < 0 || index >= pool->size) /* outside of bounds */
+ return 0;
+ if (evt != pool->events + index) /* unaligned */
+ return 0;
+ return 1;
+}
+
+/**
+ * ibmvfc_free_event - Free the specified event
+ * @evt: ibmvfc_event to be freed
+ *
+ **/
+static void ibmvfc_free_event(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ BUG_ON(!ibmvfc_valid_event(pool, evt));
+ BUG_ON(atomic_inc_return(&evt->free) != 1);
+ list_add_tail(&evt->queue, &vhost->free);
+}
+
+/**
+ * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
+ * @evt: ibmvfc event struct
+ *
+ * This function does not setup any error status, that must be done
+ * before this function gets called.
+ **/
+static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
+{
+ struct scsi_cmnd *cmnd = evt->cmnd;
+
+ if (cmnd) {
+ scsi_dma_unmap(cmnd);
+ cmnd->scsi_done(cmnd);
+ }
+
+ ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_fail_request - Fail request with specified error code
+ * @evt: ibmvfc event struct
+ * @error_code: error code to fail request with
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
+{
+ if (evt->cmnd) {
+ evt->cmnd->result = (error_code << 16);
+ evt->done = ibmvfc_scsi_eh_done;
+ } else
+ evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
+
+ list_del(&evt->queue);
+ del_timer(&evt->timer);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+}
+
+/**
+ * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
+ * @vhost: ibmvfc host struct
+ * @error_code: error code to fail requests with
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
+{
+ struct ibmvfc_event *evt, *pos;
+
+ ibmvfc_dbg(vhost, "Purging all requests\n");
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+ ibmvfc_fail_request(evt, error_code);
+}
+
+/**
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * @vhost: struct ibmvfc host to reset
+ **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+ int rc;
+
+ scsi_block_requests(vhost->host);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ if ((rc = ibmvfc_reset_crq(vhost)) ||
+ (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ dev_err(vhost->dev, "Error after reset rc=%d\n", rc);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ } else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost: struct ibmvfc host to reset
+ **/
+static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ __ibmvfc_reset_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_retry_host_init - Retry host initialization if allowed
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+{
+ if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
+ if (++vhost->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ dev_err(vhost->dev,
+ "Host initialization retries exceeded. Taking adapter offline\n");
+ ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
+ } else if (vhost->init_retries == IBMVFC_MAX_INIT_RETRIES)
+ __ibmvfc_reset_host(vhost);
+ else
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ }
+
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * __ibmvfc_find_target - Find the specified scsi_target (no locking)
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->target_id == starget->id)
+ return tgt;
+ return NULL;
+}
+
+/**
+ * ibmvfc_find_target - Find the specified scsi_target
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * ibmvfc_target struct / NULL if not found
+ **/
+static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ tgt = __ibmvfc_find_target(starget);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return tgt;
+}
+
+/**
+ * ibmvfc_get_host_speed - Get host port speed
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ switch (vhost->login_buf->resp.link_speed / 100) {
+ case 1:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case 2:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case 4:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case 8:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
+ case 10:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ case 16:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
+ default:
+ ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n",
+ vhost->login_buf->resp.link_speed / 100);
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ } else
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_get_host_port_state - Get host port state
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ switch (vhost->state) {
+ case IBMVFC_INITIALIZING:
+ case IBMVFC_ACTIVE:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ case IBMVFC_LINK_DOWN:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case IBMVFC_LINK_DEAD:
+ case IBMVFC_HOST_OFFLINE:
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ break;
+ case IBMVFC_HALTED:
+ fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
+ break;
+ default:
+ ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/**
+ * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
+ * @rport: rport struct
+ * @timeout: timeout value
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+/**
+ * ibmvfc_get_starget_node_name - Get SCSI target's node name
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_name - Get SCSI target's port name
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
+}
+
+/**
+ * ibmvfc_get_starget_port_id - Get SCSI target's port ID
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
+{
+ struct ibmvfc_target *tgt = ibmvfc_find_target(starget);
+ fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
+}
+
+/**
+ * ibmvfc_wait_while_resetting - Wait while the host resets
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
+{
+ long timeout = wait_event_timeout(vhost->init_wait_q,
+ (vhost->state == IBMVFC_ACTIVE ||
+ vhost->state == IBMVFC_HOST_OFFLINE ||
+ vhost->state == IBMVFC_LINK_DEAD),
+ (init_timeout * HZ));
+
+ return timeout ? 0 : -EIO;
+}
+
+/**
+ * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
+ * @shost: scsi host struct
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
+ ibmvfc_reset_host(vhost);
+ return ibmvfc_wait_while_resetting(vhost);
+}
+
+/**
+ * ibmvfc_gather_partition_info - Gather info about the LPAR
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
+{
+ struct device_node *rootdn;
+ const char *name;
+ const unsigned int *num;
+
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn)
+ return;
+
+ name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (name)
+ strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
+ num = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (num)
+ vhost->partition_number = *num;
+ of_node_put(rootdn);
+}
+
+/**
+ * ibmvfc_set_login_info - Setup info for NPIV login
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+ struct device_node *of_node = vhost->dev->archdata.of_node;
+ const char *location;
+
+ memset(login_info, 0, sizeof(*login_info));
+
+ login_info->ostype = IBMVFC_OS_LINUX;
+ login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
+ login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
+ login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
+ login_info->partition_num = vhost->partition_number;
+ login_info->vfc_frame_version = 1;
+ login_info->fcp_version = 3;
+ if (vhost->client_migrated)
+ login_info->flags = IBMVFC_CLIENT_MIGRATED;
+
+ login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ login_info->capabilities = IBMVFC_CAN_MIGRATE;
+ login_info->async.va = vhost->async_crq.msg_token;
+ login_info->async.len = vhost->async_crq.size;
+ strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
+ strncpy(login_info->device_name,
+ vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
+
+ location = of_get_property(of_node, "ibm,loc-code", NULL);
+ location = location ? location : vhost->dev->bus_id;
+ strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
+}
+
+/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ ENTER;
+ pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage = dma_alloc_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pool->size; ++i) {
+ struct ibmvfc_event *evt = &pool->events[i];
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->vhost = vhost;
+ evt->ext_list = NULL;
+ list_add_tail(&evt->queue, &vhost->free);
+ }
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &vhost->pool;
+
+ ENTER;
+ for (i = 0; i < pool->size; ++i) {
+ list_del(&pool->events[i].queue);
+ BUG_ON(atomic_read(&pool->events[i].free) != 1);
+ if (pool->events[i].ext_list)
+ dma_pool_free(vhost->sg_pool,
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+
+ kfree(pool->events);
+ dma_free_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @vhost: ibmvfc host struct
+ *
+ * Returns a free event from the pool.
+ **/
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_event *evt;
+
+ BUG_ON(list_empty(&vhost->free));
+ evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue);
+ return evt;
+}
+
+/**
+ * ibmvfc_init_event - Initialize fields in an event struct that are always
+ * required.
+ * @evt: The event
+ * @done: Routine to call when the event is responded to
+ * @format: SRP or MAD format
+ **/
+static void ibmvfc_init_event(struct ibmvfc_event *evt,
+ void (*done) (struct ibmvfc_event *), u8 format)
+{
+ evt->cmnd = NULL;
+ evt->sync_iu = NULL;
+ evt->crq.format = format;
+ evt->done = done;
+}
+
+/**
+ * ibmvfc_map_sg_list - Initialize scatterlist
+ * @scmd: scsi command struct
+ * @nseg: number of scatterlist segments
+ * @md: memory descriptor list to initialize
+ **/
+static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
+ struct srp_direct_buf *md)
+{
+ int i;
+ struct scatterlist *sg;
+
+ scsi_for_each_sg(scmd, sg, nseg, i) {
+ md[i].va = sg_dma_address(sg);
+ md[i].len = sg_dma_len(sg);
+ md[i].key = 0;
+ }
+}
+
+/**
+ * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
+ * @scmd: Scsi_Cmnd with the scatterlist
+ * @evt: ibmvfc event struct
+ * @vfc_cmd: vfc_cmd that contains the memory descriptor
+ * @dev: device for which to map dma memory
+ *
+ * Returns:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
+ struct ibmvfc_event *evt,
+ struct ibmvfc_cmd *vfc_cmd, struct device *dev)
+{
+
+ int sg_mapped;
+ struct srp_direct_buf *data = &vfc_cmd->ioba;
+ struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+
+ sg_mapped = scsi_dma_map(scmd);
+ if (!sg_mapped) {
+ vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
+ return 0;
+ } else if (unlikely(sg_mapped < 0)) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
+ return sg_mapped;
+ }
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ vfc_cmd->flags |= IBMVFC_WRITE;
+ vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
+ } else {
+ vfc_cmd->flags |= IBMVFC_READ;
+ vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
+ }
+
+ if (sg_mapped == 1) {
+ ibmvfc_map_sg_list(scmd, sg_mapped, data);
+ return 0;
+ }
+
+ vfc_cmd->flags |= IBMVFC_SCATTERLIST;
+
+ if (!evt->ext_list) {
+ evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
+ &evt->ext_list_token);
+
+ if (!evt->ext_list) {
+ scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
+ return -ENOMEM;
+ }
+ }
+
+ ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
+
+ data->va = evt->ext_list_token;
+ data->len = sg_mapped * sizeof(struct srp_direct_buf);
+ data->key = 0;
+ return 0;
+}
+
+/**
+ * ibmvfc_timeout - Internal command timeout handler
+ * @evt: struct ibmvfc_event that timed out
+ *
+ * Called when an internally generated command times out
+ **/
+static void ibmvfc_timeout(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
+ ibmvfc_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
+ * @evt: event to be sent
+ * @vhost: ibmvfc host struct
+ * @timeout: timeout in seconds - 0 means do not time command
+ *
+ * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
+ **/
+static int ibmvfc_send_event(struct ibmvfc_event *evt,
+ struct ibmvfc_host *vhost, unsigned long timeout)
+{
+ u64 *crq_as_u64 = (u64 *) &evt->crq;
+ int rc;
+
+ /* Copy the IU into the transfer area */
+ *evt->xfer_iu = evt->iu;
+ if (evt->crq.format == IBMVFC_CMD_FORMAT)
+ evt->xfer_iu->cmd.tag = (u64)evt;
+ else if (evt->crq.format == IBMVFC_MAD_FORMAT)
+ evt->xfer_iu->mad_common.tag = (u64)evt;
+ else
+ BUG();
+
+ list_add_tail(&evt->queue, &vhost->sent);
+ init_timer(&evt->timer);
+
+ if (timeout) {
+ evt->timer.data = (unsigned long) evt;
+ evt->timer.expires = jiffies + (timeout * HZ);
+ evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
+ add_timer(&evt->timer);
+ }
+
+ if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
+ list_del(&evt->queue);
+ del_timer(&evt->timer);
+
+ /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
+ * Firmware will send a CRQ with a transport event (0xFF) to
+ * tell this client what has happened to the transport. This
+ * will be handled in ibmvfc_handle_crq()
+ */
+ if (rc == H_CLOSED) {
+ if (printk_ratelimit())
+ dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
+ if (evt->cmnd)
+ scsi_dma_unmap(evt->cmnd);
+ ibmvfc_free_event(evt);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
+ if (evt->cmnd) {
+ evt->cmnd->result = DID_ERROR << 16;
+ evt->done = ibmvfc_scsi_eh_done;
+ } else
+ evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
+
+ evt->done(evt);
+ } else
+ ibmvfc_trc_start(evt);
+
+ return 0;
+}
+
+/**
+ * ibmvfc_log_error - Log an error for the failed command if appropriate
+ * @evt: ibmvfc event to log
+ *
+ **/
+static void ibmvfc_log_error(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct scsi_cmnd *cmnd = evt->cmnd;
+ const char *err = unknown_error;
+ int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+ int logerr = 0;
+ int rsp_code = 0;
+
+ if (index >= 0) {
+ logerr = cmd_status[index].log;
+ err = cmd_status[index].name;
+ }
+
+ if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL))
+ return;
+
+ if (rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = rsp->data.info.rsp_code;
+
+ scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
+ cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+ rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
+}
+
+/**
+ * ibmvfc_scsi_done - Handle responses from commands
+ * @evt: ibmvfc event to be handled
+ *
+ * Used as a callback when sending scsi cmds.
+ **/
+static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
+ struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
+ struct scsi_cmnd *cmnd = evt->cmnd;
+ int rsp_len = 0;
+ int sense_len = rsp->fcp_sense_len;
+
+ if (cmnd) {
+ if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
+ scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
+ else if (rsp->flags & FCP_RESID_UNDER)
+ scsi_set_resid(cmnd, rsp->fcp_resid);
+ else
+ scsi_set_resid(cmnd, 0);
+
+ if (vfc_cmd->status) {
+ cmnd->result = ibmvfc_get_err_result(vfc_cmd);
+
+ if (rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_len = rsp->fcp_rsp_len;
+ if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
+ if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len)
+ memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
+
+ ibmvfc_log_error(evt);
+ }
+
+ if (!cmnd->result &&
+ (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
+ cmnd->result = (DID_ERROR << 16);
+
+ scsi_dma_unmap(cmnd);
+ cmnd->scsi_done(cmnd);
+ }
+
+ ibmvfc_free_event(evt);
+}
+
+/**
+ * ibmvfc_host_chkready - Check if the host can accept commands
+ * @vhost: struct ibmvfc host
+ *
+ * Returns:
+ * 1 if host can accept command / 0 if not
+ **/
+static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
+{
+ int result = 0;
+
+ switch (vhost->state) {
+ case IBMVFC_LINK_DEAD:
+ case IBMVFC_HOST_OFFLINE:
+ result = DID_NO_CONNECT << 16;
+ break;
+ case IBMVFC_NO_CRQ:
+ case IBMVFC_INITIALIZING:
+ case IBMVFC_HALTED:
+ case IBMVFC_LINK_DOWN:
+ result = DID_REQUEUE << 16;
+ break;
+ case IBMVFC_ACTIVE:
+ result = 0;
+ break;
+ };
+
+ return result;
+}
+
+/**
+ * ibmvfc_queuecommand - The queuecommand function of the scsi template
+ * @cmnd: struct scsi_cmnd to be executed
+ * @done: Callback function to be called when cmnd is completed
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ struct ibmvfc_cmd *vfc_cmd;
+ struct ibmvfc_event *evt;
+ u8 tag[2];
+ int rc;
+
+ if (unlikely((rc = fc_remote_port_chkready(rport))) ||
+ unlikely((rc = ibmvfc_host_chkready(vhost)))) {
+ cmnd->result = rc;
+ done(cmnd);
+ return 0;
+ }
+
+ cmnd->result = (DID_OK << 16);
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
+ evt->cmnd = cmnd;
+ cmnd->scsi_done = done;
+ vfc_cmd = &evt->iu.cmd;
+ memset(vfc_cmd, 0, sizeof(*vfc_cmd));
+ vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+ vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
+ vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
+ vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
+ vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
+ vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
+ vfc_cmd->tgt_scsi_id = rport->port_id;
+ if ((rport->supported_classes & FC_COS_CLASS3) &&
+ (fc_host_supported_classes(vhost->host) & FC_COS_CLASS3))
+ vfc_cmd->flags = IBMVFC_CLASS_3_ERR;
+ vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
+ int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
+ memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
+
+ if (scsi_populate_tag_msg(cmnd, tag)) {
+ vfc_cmd->task_tag = tag[1];
+ switch (tag[0]) {
+ case MSG_SIMPLE_TAG:
+ vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
+ break;
+ case MSG_HEAD_TAG:
+ vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
+ break;
+ case MSG_ORDERED_TAG:
+ vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
+ break;
+ };
+ }
+
+ if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
+ return ibmvfc_send_event(evt, vhost, 0);
+
+ ibmvfc_free_event(evt);
+ if (rc == -ENOMEM)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ scmd_printk(KERN_ERR, cmnd,
+ "Failed to map DMA buffer for command. rc=%d\n", rc);
+
+ cmnd->result = DID_ERROR << 16;
+ done(cmnd);
+ return 0;
+}
+
+/**
+ * ibmvfc_sync_completion - Signal that a synchronous command has completed
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
+{
+ /* copy the response back */
+ if (evt->sync_iu)
+ *evt->sync_iu = *evt->xfer_iu;
+
+ complete(&evt->comp);
+}
+
+/**
+ * ibmvfc_reset_device - Reset the device with the specified reset type
+ * @sdev: scsi device to reset
+ * @type: reset type
+ * @desc: reset type description for log messages
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rsp_rc = -EBUSY;
+ unsigned long flags;
+ int rsp_code = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+ tmf->resp.len = sizeof(tmf->rsp);
+ tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+ tmf->payload_len = sizeof(tmf->iu);
+ tmf->resp_len = sizeof(tmf->rsp);
+ tmf->cancel_key = (unsigned long)sdev->hostdata;
+ tmf->tgt_scsi_id = rport->port_id;
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->iu.tmf_flags = type;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
+ desc, rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
+ wait_for_completion(&evt->comp);
+
+ if (rsp_iu.cmd.status) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+ desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
+}
+
+/**
+ * ibmvfc_abort_task_set - Abort outstanding commands to the device
+ * @sdev: scsi device to abort commands
+ *
+ * This sends an Abort Task Set to the VIOS for the specified device. This does
+ * NOT send any cancel to the VIOS. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_abort_task_set(struct scsi_device *sdev)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_cmd *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp_iu;
+ struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
+ int rsp_rc = -EBUSY;
+ unsigned long flags;
+ int rsp_code = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
+
+ tmf = &evt->iu.cmd;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
+ tmf->resp.len = sizeof(tmf->rsp);
+ tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
+ tmf->payload_len = sizeof(tmf->iu);
+ tmf->resp_len = sizeof(tmf->rsp);
+ tmf->cancel_key = (unsigned long)sdev->hostdata;
+ tmf->tgt_scsi_id = rport->port_id;
+ int_to_scsilun(sdev->lun, &tmf->iu.lun);
+ tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
+ evt->sync_iu = &rsp_iu;
+
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
+ wait_for_completion(&evt->comp);
+
+ if (rsp_iu.cmd.status) {
+ if (fc_rsp->flags & FCP_RSP_LEN_VALID)
+ rsp_code = fc_rsp->data.info.rsp_code;
+
+ sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n",
+ ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+ fc_rsp->scsi_status);
+ rsp_rc = -EIO;
+ } else
+ sdev_printk(KERN_INFO, sdev, "Abort successful\n");
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rsp_rc;
+}
+
+/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev: scsi device to cancel commands
+ * @type: type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_tmf *tmf;
+ struct ibmvfc_event *evt, *found_evt;
+ union ibmvfc_iu rsp;
+ int rsp_rc = -EBUSY;
+ unsigned long flags;
+ u16 status;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ found_evt = NULL;
+ list_for_each_entry(evt, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+
+ if (!found_evt) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+ }
+
+ if (vhost->state == IBMVFC_ACTIVE) {
+ evt = ibmvfc_get_event(vhost);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ tmf->common.version = 1;
+ tmf->common.opcode = IBMVFC_TMF_MAD;
+ tmf->common.length = sizeof(*tmf);
+ tmf->scsi_id = rport->port_id;
+ int_to_scsilun(sdev->lun, &tmf->lun);
+ tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+ tmf->cancel_key = (unsigned long)sdev->hostdata;
+ tmf->my_cancel_key = (IBMVFC_TMF_CANCEL_KEY | (unsigned long)sdev->hostdata);
+
+ evt->sync_iu = &rsp;
+ init_completion(&evt->comp);
+ rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (rsp_rc != 0) {
+ sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+ wait_for_completion(&evt->comp);
+ status = rsp.mad_common.status;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_free_event(evt);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ return -EIO;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+ return 0;
+}
+
+/**
+ * ibmvfc_eh_abort_handler - Abort a command
+ * @cmd: scsi command to abort
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+ struct ibmvfc_event *evt, *pos;
+ int cancel_rc, abort_rc;
+ unsigned long flags;
+
+ ENTER;
+ ibmvfc_wait_while_resetting(vhost);
+ cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_ABORT_TASK_SET);
+ abort_rc = ibmvfc_abort_task_set(cmd->device);
+
+ if (!cancel_rc && !abort_rc) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == cmd->device)
+ ibmvfc_fail_request(evt, DID_ABORT);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+ return SUCCESS;
+ }
+
+ LEAVE;
+ return FAILED;
+}
+
+/**
+ * ibmvfc_eh_device_reset_handler - Reset a single LUN
+ * @cmd: scsi command struct
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+ struct ibmvfc_event *evt, *pos;
+ int cancel_rc, reset_rc;
+ unsigned long flags;
+
+ ENTER;
+ ibmvfc_wait_while_resetting(vhost);
+ cancel_rc = ibmvfc_cancel_all(cmd->device, IBMVFC_TMF_LUN_RESET);
+ reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_LUN_RESET, "LUN");
+
+ if (!cancel_rc && !reset_rc) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+ if (evt->cmnd && evt->cmnd->device == cmd->device)
+ ibmvfc_fail_request(evt, DID_ABORT);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+ return SUCCESS;
+ }
+
+ LEAVE;
+ return FAILED;
+}
+
+/**
+ * ibmvfc_dev_cancel_all - Device iterated cancel all function
+ * @sdev: scsi device struct
+ * @data: return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all(struct scsi_device *sdev, void *data)
+{
+ unsigned long *rc = data;
+ *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
+}
+
+/**
+ * ibmvfc_dev_abort_all - Device iterated abort task set function
+ * @sdev: scsi device struct
+ * @data: return code
+ *
+ **/
+static void ibmvfc_dev_abort_all(struct scsi_device *sdev, void *data)
+{
+ unsigned long *rc = data;
+ *rc |= ibmvfc_abort_task_set(sdev);
+}
+
+/**
+ * ibmvfc_eh_target_reset_handler - Reset the target
+ * @cmd: scsi command struct
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+ struct scsi_target *starget = scsi_target(cmd->device);
+ struct ibmvfc_event *evt, *pos;
+ int reset_rc;
+ unsigned long cancel_rc = 0;
+ unsigned long flags;
+
+ ENTER;
+ ibmvfc_wait_while_resetting(vhost);
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+ reset_rc = ibmvfc_reset_device(cmd->device, IBMVFC_TARGET_RESET, "target");
+
+ if (!cancel_rc && !reset_rc) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+ ibmvfc_fail_request(evt, DID_ABORT);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ LEAVE;
+ return SUCCESS;
+ }
+
+ LEAVE;
+ return FAILED;
+}
+
+/**
+ * ibmvfc_eh_host_reset_handler - Reset the connection to the server
+ * @cmd: struct scsi_cmnd having problems
+ *
+ **/
+static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+ int rc;
+ struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
+
+ dev_err(vhost->dev, "Resetting connection due to error recovery\n");
+ rc = ibmvfc_issue_fc_host_lip(vhost->host);
+ return rc ? FAILED : SUCCESS;
+}
+
+/**
+ * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
+ * @rport: rport struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
+{
+ struct scsi_target *starget = to_scsi_target(&rport->dev);
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ struct ibmvfc_event *evt, *pos;
+ unsigned long cancel_rc = 0;
+ unsigned long abort_rc = 0;
+ unsigned long flags;
+
+ ENTER;
+ starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all);
+ starget_for_each_device(starget, &abort_rc, ibmvfc_dev_abort_all);
+
+ if (!cancel_rc && !abort_rc) {
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->sent, queue) {
+ if (evt->cmnd && scsi_target(evt->cmnd->device) == starget)
+ ibmvfc_fail_request(evt, DID_ABORT);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ } else
+ ibmvfc_issue_fc_host_lip(shost);
+
+ scsi_target_unblock(&rport->dev);
+ LEAVE;
+}
+
+static const struct {
+ enum ibmvfc_async_event ae;
+ const char *desc;
+} ae_desc [] = {
+ { IBMVFC_AE_ELS_PLOGI, "PLOGI" },
+ { IBMVFC_AE_ELS_LOGO, "LOGO" },
+ { IBMVFC_AE_ELS_PRLO, "PRLO" },
+ { IBMVFC_AE_SCN_NPORT, "N-Port SCN" },
+ { IBMVFC_AE_SCN_GROUP, "Group SCN" },
+ { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" },
+ { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" },
+ { IBMVFC_AE_LINK_UP, "Link Up" },
+ { IBMVFC_AE_LINK_DOWN, "Link Down" },
+ { IBMVFC_AE_LINK_DEAD, "Link Dead" },
+ { IBMVFC_AE_HALT, "Halt" },
+ { IBMVFC_AE_RESUME, "Resume" },
+ { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" },
+};
+
+static const char *unknown_ae = "Unknown async";
+
+/**
+ * ibmvfc_get_ae_desc - Get text description for async event
+ * @ae: async event
+ *
+ **/
+static const char *ibmvfc_get_ae_desc(u64 ae)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
+ if (ae_desc[i].ae == ae)
+ return ae_desc[i].desc;
+
+ return unknown_ae;
+}
+
+/**
+ * ibmvfc_handle_async - Handle an async event from the adapter
+ * @crq: crq to process
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
+ struct ibmvfc_host *vhost)
+{
+ const char *desc = ibmvfc_get_ae_desc(crq->event);
+
+ ibmvfc_log(vhost, 2, "%s event received\n", desc);
+
+ switch (crq->event) {
+ case IBMVFC_AE_LINK_UP:
+ case IBMVFC_AE_RESUME:
+ vhost->events_to_log |= IBMVFC_AE_LINKUP;
+ ibmvfc_init_host(vhost);
+ break;
+ case IBMVFC_AE_SCN_FABRIC:
+ vhost->events_to_log |= IBMVFC_AE_RSCN;
+ ibmvfc_init_host(vhost);
+ break;
+ case IBMVFC_AE_SCN_NPORT:
+ case IBMVFC_AE_SCN_GROUP:
+ case IBMVFC_AE_SCN_DOMAIN:
+ vhost->events_to_log |= IBMVFC_AE_RSCN;
+ case IBMVFC_AE_ELS_LOGO:
+ case IBMVFC_AE_ELS_PRLO:
+ case IBMVFC_AE_ELS_PLOGI:
+ ibmvfc_reinit_host(vhost);
+ break;
+ case IBMVFC_AE_LINK_DOWN:
+ case IBMVFC_AE_ADAPTER_FAILED:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ break;
+ case IBMVFC_AE_LINK_DEAD:
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ case IBMVFC_AE_HALT:
+ ibmvfc_link_down(vhost, IBMVFC_HALTED);
+ break;
+ default:
+ dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event);
+ break;
+ };
+}
+
+/**
+ * ibmvfc_handle_crq - Handles and frees received events in the CRQ
+ * @crq: Command/Response queue
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+{
+ long rc;
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
+
+ switch (crq->valid) {
+ case IBMVFC_CRQ_INIT_RSP:
+ switch (crq->format) {
+ case IBMVFC_CRQ_INIT:
+ dev_info(vhost->dev, "Partner initialized\n");
+ /* Send back a response */
+ rc = ibmvfc_send_crq_init_complete(vhost);
+ if (rc == 0)
+ ibmvfc_init_host(vhost);
+ else
+ dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
+ break;
+ case IBMVFC_CRQ_INIT_COMPLETE:
+ dev_info(vhost->dev, "Partner initialization complete\n");
+ ibmvfc_init_host(vhost);
+ break;
+ default:
+ dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
+ }
+ return;
+ case IBMVFC_CRQ_XPORT_EVENT:
+ vhost->state = IBMVFC_NO_CRQ;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ if (crq->format == IBMVFC_PARTITION_MIGRATED) {
+ /* We need to re-setup the interpartition connection */
+ dev_info(vhost->dev, "Re-enabling adapter\n");
+ vhost->client_migrated = 1;
+ ibmvfc_purge_requests(vhost, DID_REQUEUE);
+ if ((rc = ibmvfc_reenable_crq_queue(vhost)) ||
+ (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after enable (rc=%ld)\n", rc);
+ } else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ } else {
+ dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ if ((rc = ibmvfc_reset_crq(vhost)) ||
+ (rc = ibmvfc_send_crq_init(vhost))) {
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ dev_err(vhost->dev, "Error after reset (rc=%ld)\n", rc);
+ } else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ }
+ return;
+ case IBMVFC_CRQ_CMD_RSP:
+ break;
+ default:
+ dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
+ return;
+ }
+
+ if (crq->format == IBMVFC_ASYNC_EVENT)
+ return;
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+ dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n",
+ crq->ioba);
+ return;
+ }
+
+ if (unlikely(atomic_read(&evt->free))) {
+ dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n",
+ crq->ioba);
+ return;
+ }
+
+ del_timer(&evt->timer);
+ list_del(&evt->queue);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+}
+
+/**
+ * ibmvfc_scan_finished - Check if the device scan is done.
+ * @shost: scsi host struct
+ * @time: current elapsed time
+ *
+ * Returns:
+ * 0 if scan is not done / 1 if scan is done
+ **/
+static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ unsigned long flags;
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ int done = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (time >= (init_timeout * HZ)) {
+ dev_info(vhost->dev, "Scan taking longer than %d seconds, "
+ "continuing initialization\n", init_timeout);
+ done = 1;
+ }
+
+ if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+ done = 1;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return done;
+}
+
+/**
+ * ibmvfc_slave_alloc - Setup the device's task set value
+ * @sdev: struct scsi_device device to configure
+ *
+ * Set the device's task set value so that error handling works as
+ * expected.
+ *
+ * Returns:
+ * 0 on success / -ENXIO if device does not exist
+ **/
+static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_slave_configure - Configure the device
+ * @sdev: struct scsi_device device to configure
+ *
+ * Enable allow_restart for a device if it is a disk. Adjust the
+ * queue_depth here also.
+ *
+ * Returns:
+ * 0
+ **/
+static int ibmvfc_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (sdev->type == TYPE_DISK)
+ sdev->allow_restart = 1;
+
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ } else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+
+ rport->dev_loss_tmo = dev_loss_tmo;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_change_queue_depth - Change the device's queue depth
+ * @sdev: scsi device struct
+ * @qdepth: depth to set
+ *
+ * Return value:
+ * actual depth set
+ **/
+static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
+ qdepth = IBMVFC_MAX_CMDS_PER_LUN;
+
+ scsi_adjust_queue_depth(sdev, 0, qdepth);
+ return sdev->queue_depth;
+}
+
+/**
+ * ibmvfc_change_queue_type - Change the device's queue type
+ * @sdev: scsi device struct
+ * @tag_type: type of tags to use
+ *
+ * Return value:
+ * actual queue type set
+ **/
+static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.partition_name);
+}
+
+static struct device_attribute ibmvfc_host_partition_name = {
+ .attr = {
+ .name = "partition_name",
+ .mode = S_IRUGO,
+ },
+ .show = ibmvfc_show_host_partition_name,
+};
+
+static ssize_t ibmvfc_show_host_device_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.device_name);
+}
+
+static struct device_attribute ibmvfc_host_device_name = {
+ .attr = {
+ .name = "device_name",
+ .mode = S_IRUGO,
+ },
+ .show = ibmvfc_show_host_device_name,
+};
+
+static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.port_loc_code);
+}
+
+static struct device_attribute ibmvfc_host_loc_code = {
+ .attr = {
+ .name = "port_loc_code",
+ .mode = S_IRUGO,
+ },
+ .show = ibmvfc_show_host_loc_code,
+};
+
+static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vhost->login_buf->resp.drc_name);
+}
+
+static struct device_attribute ibmvfc_host_drc_name = {
+ .attr = {
+ .name = "drc_name",
+ .mode = S_IRUGO,
+ },
+ .show = ibmvfc_show_host_drc_name,
+};
+
+static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
+}
+
+static struct device_attribute ibmvfc_host_npiv_version = {
+ .attr = {
+ .name = "npiv_version",
+ .mode = S_IRUGO,
+ },
+ .show = ibmvfc_show_host_npiv_version,
+};
+
+/**
+ * ibmvfc_show_log_level - Show the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return len;
+}
+
+/**
+ * ibmvfc_store_log_level - Change the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_store_log_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ vhost->log_level = simple_strtoul(buf, NULL, 10);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return strlen(buf);
+}
+
+static struct device_attribute ibmvfc_log_level_attr = {
+ .attr = {
+ .name = "log_level",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ibmvfc_show_log_level,
+ .store = ibmvfc_store_log_level
+};
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+/**
+ * ibmvfc_read_trace - Dump the adapter trace
+ * @kobj: kobject struct
+ * @bin_attr: bin_attribute struct
+ * @buf: buffer
+ * @off: offset
+ * @count: buffer size
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ibmvfc_read_trace(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int size = IBMVFC_TRACE_SIZE;
+ char *src = (char *)vhost->trace;
+
+ if (off > size)
+ return 0;
+ if (off + count > size) {
+ size -= off;
+ count = size;
+ }
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ memcpy(buf, &src[off], count);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return count;
+}
+
+static struct bin_attribute ibmvfc_trace_attr = {
+ .attr = {
+ .name = "trace",
+ .mode = S_IRUGO,
+ },
+ .size = 0,
+ .read = ibmvfc_read_trace,
+};
+#endif
+
+static struct device_attribute *ibmvfc_attrs[] = {
+ &ibmvfc_host_partition_name,
+ &ibmvfc_host_device_name,
+ &ibmvfc_host_loc_code,
+ &ibmvfc_host_drc_name,
+ &ibmvfc_host_npiv_version,
+ &ibmvfc_log_level_attr,
+ NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .name = "IBM POWER Virtual FC Adapter",
+ .proc_name = IBMVFC_NAME,
+ .queuecommand = ibmvfc_queuecommand,
+ .eh_abort_handler = ibmvfc_eh_abort_handler,
+ .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
+ .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
+ .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
+ .slave_alloc = ibmvfc_slave_alloc,
+ .slave_configure = ibmvfc_slave_configure,
+ .scan_finished = ibmvfc_scan_finished,
+ .change_queue_depth = ibmvfc_change_queue_depth,
+ .change_queue_type = ibmvfc_change_queue_type,
+ .cmd_per_lun = 16,
+ .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = IBMVFC_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = ibmvfc_attrs,
+};
+
+/**
+ * ibmvfc_next_async_crq - Returns the next entry in async queue
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+ struct ibmvfc_async_crq *crq;
+
+ crq = &async_crq->msgs[async_crq->cur];
+ if (crq->valid & 0x80) {
+ if (++async_crq->cur == async_crq->size)
+ async_crq->cur = 0;
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+/**
+ * ibmvfc_next_crq - Returns the next entry in message queue
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * Pointer to next entry in queue / NULL if empty
+ **/
+static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_crq_queue *queue = &vhost->crq;
+ struct ibmvfc_crq *crq;
+
+ crq = &queue->msgs[queue->cur];
+ if (crq->valid & 0x80) {
+ if (++queue->cur == queue->size)
+ queue->cur = 0;
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+/**
+ * ibmvfc_interrupt - Interrupt handler
+ * @irq: number of irq to handle, not used
+ * @dev_instance: ibmvfc_host that received interrupt
+ *
+ * Returns:
+ * IRQ_HANDLED
+ **/
+static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
+{
+ struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_async_crq *async;
+ unsigned long flags;
+ int done = 0;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vio_disable_interrupts(to_vio_dev(vhost->dev));
+ while (!done) {
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
+ }
+
+ /* Pull all the valid messages off the async CRQ */
+ while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ ibmvfc_handle_async(async, vhost);
+ async->valid = 0;
+ }
+
+ vio_enable_interrupts(vdev);
+ if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
+ } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_async(async, vhost);
+ crq->valid = 0;
+ } else
+ done = 1;
+ }
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+/**
+ * ibmvfc_init_tgt - Set the next init job step for the target
+ * @tgt: ibmvfc target struct
+ * @job_step: job step to perform
+ *
+ **/
+static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
+ void (*job_step) (struct ibmvfc_target *))
+{
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
+ tgt->job_step = job_step;
+ wake_up(&tgt->vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
+ * @tgt: ibmvfc target struct
+ * @job_step: initialization job step
+ *
+ **/
+static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+ void (*job_step) (struct ibmvfc_target *))
+{
+ if (++tgt->init_retries > IBMVFC_MAX_INIT_RETRIES) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ wake_up(&tgt->vhost->work_wait_q);
+ } else
+ ibmvfc_init_tgt(tgt, job_step);
+}
+
+/**
+ * ibmvfc_release_tgt - Free memory allocated for a target
+ * @kref: kref struct
+ *
+ **/
+static void ibmvfc_release_tgt(struct kref *kref)
+{
+ struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
+ kfree(tgt);
+}
+
+/**
+ * ibmvfc_tgt_prli_done - Completion handler for Process Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
+ u32 status = rsp->common.status;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Process Login succeeded\n");
+ tgt->need_login = 0;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error),
+ rsp->status, rsp->error, status);
+ if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_prli - Send a process login
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_process_login *prli;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ prli = &evt->iu.prli;
+ memset(prli, 0, sizeof(*prli));
+ prli->common.version = 1;
+ prli->common.opcode = IBMVFC_PROCESS_LOGIN;
+ prli->common.length = sizeof(*prli);
+ prli->scsi_id = tgt->scsi_id;
+
+ prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
+ prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
+ prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent process login\n");
+}
+
+/**
+ * ibmvfc_tgt_plogi_done - Completion handler for Port Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
+ u32 status = rsp->common.status;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Port Login succeeded\n");
+ if (tgt->ids.port_name &&
+ tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
+ vhost->reinit = 1;
+ tgt_dbg(tgt, "Port re-init required\n");
+ break;
+ }
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->ids.port_id = tgt->scsi_id;
+ tgt->ids.roles = FC_PORT_ROLE_FCP_TARGET;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+ ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+ if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_port_login *plogi;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ plogi = &evt->iu.plogi;
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->common.version = 1;
+ plogi->common.opcode = IBMVFC_PORT_LOGIN;
+ plogi->common.length = sizeof(*plogi);
+ plogi->scsi_id = tgt->scsi_id;
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent port login\n");
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
+ u32 status = rsp->common.status;
+
+ vhost->discovery_threads--;
+ ibmvfc_free_event(evt);
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Implicit Logout succeeded\n");
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+ return;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
+ break;
+ };
+
+ if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
+ else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
+ tgt->scsi_id != tgt->new_scsi_id)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_implicit_logout *mad;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ mad = &evt->iu.implicit_logout;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
+ mad->common.length = sizeof(*mad);
+ mad->old_scsi_id = tgt->scsi_id;
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Implicit Logout\n");
+}
+
+/**
+ * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
+ u32 status = rsp->common.status;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Query Target succeeded\n");
+ tgt->new_scsi_id = rsp->scsi_id;
+ if (rsp->scsi_id != tgt->scsi_id)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+ ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
+
+ if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
+ rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
+ rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+ break;
+ };
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_query_tgt *query_tgt;
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ evt->tgt = tgt;
+ ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
+ query_tgt = &evt->iu.query_tgt;
+ memset(query_tgt, 0, sizeof(*query_tgt));
+ query_tgt->common.version = 1;
+ query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
+ query_tgt->common.length = sizeof(*query_tgt);
+ query_tgt->wwpn = tgt->ids.port_name;
+
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Query Target\n");
+}
+
+/**
+ * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
+ * @vhost: ibmvfc host struct
+ * @scsi_id: SCSI ID to allocate target for
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+{
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == scsi_id) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
+ }
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+ if (!tgt) {
+ dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n",
+ scsi_id);
+ return -ENOMEM;
+ }
+
+ tgt->scsi_id = scsi_id;
+ tgt->new_scsi_id = scsi_id;
+ tgt->vhost = vhost;
+ tgt->need_login = 1;
+ kref_init(&tgt->kref);
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ list_add_tail(&tgt->queue, &vhost->targets);
+
+unlock_out:
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+}
+
+/**
+ * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
+{
+ int i, rc;
+
+ for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
+ rc = ibmvfc_alloc_target(vhost,
+ vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
+
+ return rc;
+}
+
+/**
+ * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
+ u32 mad_status = rsp->common.status;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
+ vhost->num_targets = rsp->num_written;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
+ break;
+ case IBMVFC_MAD_FAILED:
+ dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+ ibmvfc_retry_host_init(vhost);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ default:
+ dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ break;
+ }
+
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_discover_targets - Send Discover Targets MAD
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_discover_targets *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+ ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.discover_targets;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_DISC_TARGETS;
+ mad->common.length = sizeof(*mad);
+ mad->bufflen = vhost->disc_buf_sz;
+ mad->buffer.va = vhost->disc_buf_dma;
+ mad->buffer.len = vhost->disc_buf_sz;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent discover targets\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
+/**
+ * ibmvfc_npiv_login_done - Completion handler for NPIV Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ u32 mad_status = evt->xfer_iu->npiv_login.common.status;
+ struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
+ unsigned int npiv_max_sectors;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_free_event(evt);
+ break;
+ case IBMVFC_MAD_FAILED:
+ dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
+ ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+ if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ ibmvfc_retry_host_init(vhost);
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_host_init(vhost);
+ case IBMVFC_MAD_DRIVER_FAILED:
+ ibmvfc_free_event(evt);
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ }
+
+ vhost->client_migrated = 0;
+
+ if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
+ dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
+ rsp->flags);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ wake_up(&vhost->work_wait_q);
+ return;
+ }
+
+ if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
+ dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
+ rsp->max_cmds);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ wake_up(&vhost->work_wait_q);
+ return;
+ }
+
+ npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
+ dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
+ rsp->partition_name, rsp->device_name, rsp->port_loc_code,
+ rsp->drc_name, npiv_max_sectors);
+
+ fc_host_fabric_name(vhost->host) = rsp->node_name;
+ fc_host_node_name(vhost->host) = rsp->node_name;
+ fc_host_port_name(vhost->host) = rsp->port_name;
+ fc_host_port_id(vhost->host) = rsp->scsi_id;
+ fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
+ fc_host_supported_classes(vhost->host) = 0;
+ if (rsp->service_parms.class1_parms[0] & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
+ if (rsp->service_parms.class2_parms[0] & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
+ if (rsp->service_parms.class3_parms[0] & 0x80000000)
+ fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
+ fc_host_maxframe_size(vhost->host) =
+ rsp->service_parms.common.bb_rcv_sz & 0x0fff;
+
+ vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
+ vhost->host->max_sectors = npiv_max_sectors;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_npiv_login - Sends NPIV login
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_npiv_login_mad *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+
+ ibmvfc_gather_partition_info(vhost);
+ ibmvfc_set_login_info(vhost);
+ ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
+
+ memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
+ mad = &evt->iu.npiv_login;
+ memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
+ mad->common.version = 1;
+ mad->common.opcode = IBMVFC_NPIV_LOGIN;
+ mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
+ mad->buffer.va = vhost->login_buf_dma;
+ mad->buffer.len = sizeof(*vhost->login_buf);
+
+ memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ vhost->async_crq.cur = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent NPIV login\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+};
+
+/**
+ * ibmvfc_dev_init_to_do - Is there target initialization work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
+ tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+
+ if (kthread_should_stop())
+ return 1;
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ return 0;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ if (vhost->discovery_threads == disc_threads)
+ return 0;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT)
+ return 1;
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
+ return 0;
+ return 1;
+ case IBMVFC_HOST_ACTION_INIT:
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ case IBMVFC_HOST_ACTION_TGT_ADD:
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ case IBMVFC_HOST_ACTION_QUERY:
+ default:
+ break;
+ };
+
+ return 1;
+}
+
+/**
+ * ibmvfc_work_to_do - Is there task level work to do?
+ * @vhost: ibmvfc host struct
+ *
+ * Returns:
+ * 1 if work to do / 0 if not
+ **/
+static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ rc = __ibmvfc_work_to_do(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return rc;
+}
+
+/**
+ * ibmvfc_log_ae - Log async events if necessary
+ * @vhost: ibmvfc host struct
+ * @events: events to log
+ *
+ **/
+static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
+{
+ if (events & IBMVFC_AE_RSCN)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
+ if ((events & IBMVFC_AE_LINKDOWN) &&
+ vhost->state >= IBMVFC_HALTED)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+ if ((events & IBMVFC_AE_LINKUP) &&
+ vhost->state == IBMVFC_INITIALIZING)
+ fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+}
+
+/**
+ * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ tgt_dbg(tgt, "Adding rport\n");
+ rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ tgt->rport = rport;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ if (rport) {
+ tgt_dbg(tgt, "rport add succeeded\n");
+ rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
+ rport->supported_classes = 0;
+ if (tgt->service_parms.class1_parms[0] & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS1;
+ if (tgt->service_parms.class2_parms[0] & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS2;
+ if (tgt->service_parms.class3_parms[0] & 0x80000000)
+ rport->supported_classes |= FC_COS_CLASS3;
+ } else
+ tgt_dbg(tgt, "rport add failed\n");
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_do_work - Do task level work
+ * @vhost: ibmvfc host struct
+ *
+ **/
+static void ibmvfc_do_work(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_target *tgt;
+ unsigned long flags;
+ struct fc_rport *rport;
+
+ ibmvfc_log_ae(vhost, vhost->events_to_log);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vhost->events_to_log = 0;
+ switch (vhost->action) {
+ case IBMVFC_HOST_ACTION_NONE:
+ case IBMVFC_HOST_ACTION_INIT_WAIT:
+ break;
+ case IBMVFC_HOST_ACTION_INIT:
+ BUG_ON(vhost->state != IBMVFC_INITIALIZING);
+ vhost->job_step(vhost);
+ break;
+ case IBMVFC_HOST_ACTION_QUERY:
+ list_for_each_entry(tgt, &vhost->targets, queue)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
+ break;
+ case IBMVFC_HOST_ACTION_QUERY_TGTS:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (!ibmvfc_dev_init_to_do(vhost))
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
+ break;
+ case IBMVFC_HOST_ACTION_TGT_DEL:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt_dbg(tgt, "Deleting rport\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ list_del(&tgt->queue);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ return;
+ }
+ }
+
+ if (vhost->state == IBMVFC_INITIALIZING) {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+ vhost->job_step = ibmvfc_discover_targets;
+ } else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ wake_up(&vhost->init_wait_q);
+ return;
+ }
+ break;
+ case IBMVFC_HOST_ACTION_ALLOC_TGTS:
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_alloc_targets(vhost);
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ break;
+ case IBMVFC_HOST_ACTION_TGT_INIT:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
+ tgt->job_step(tgt);
+ break;
+ }
+ }
+
+ if (!ibmvfc_dev_init_to_do(vhost)) {
+ ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
+ vhost->init_retries = 0;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ scsi_unblock_requests(vhost->host);
+ return;
+ }
+ break;
+ case IBMVFC_HOST_ACTION_TGT_ADD:
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_tgt_add_rport(tgt);
+ return;
+ } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ tgt_dbg(tgt, "Deleting rport\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ list_del(&tgt->queue);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ return;
+ }
+ }
+
+ if (vhost->reinit) {
+ vhost->reinit = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ } else {
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+ wake_up(&vhost->init_wait_q);
+ }
+ break;
+ default:
+ break;
+ };
+
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
+/**
+ * ibmvfc_work - Do task level work
+ * @data: ibmvfc host struct
+ *
+ * Returns:
+ * zero
+ **/
+static int ibmvfc_work(void *data)
+{
+ struct ibmvfc_host *vhost = data;
+ int rc;
+
+ set_user_nice(current, -20);
+
+ while (1) {
+ rc = wait_event_interruptible(vhost->work_wait_q,
+ ibmvfc_work_to_do(vhost));
+
+ BUG_ON(rc);
+
+ if (kthread_should_stop())
+ break;
+
+ ibmvfc_do_work(vhost);
+ }
+
+ ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
+ return 0;
+}
+
+/**
+ * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
+ * @vhost: ibmvfc host struct
+ *
+ * Allocates a page for messages, maps it for dma, and registers
+ * the crq with the hypervisor.
+ *
+ * Return value:
+ * zero on success / other on failure
+ **/
+static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
+{
+ int rc, retrc = -ENOMEM;
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_crq_queue *crq = &vhost->crq;
+
+ ENTER;
+ crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
+
+ if (!crq->msgs)
+ return -ENOMEM;
+
+ crq->size = PAGE_SIZE / sizeof(*crq->msgs);
+ crq->msg_token = dma_map_single(dev, crq->msgs,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(crq->msg_token))
+ goto map_failed;
+
+ retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
+ crq->msg_token, PAGE_SIZE);
+
+ if (rc == H_RESOURCE)
+ /* maybe kexecing and resource is busy. try a reset */
+ retrc = rc = ibmvfc_reset_crq(vhost);
+
+ if (rc == H_CLOSED)
+ dev_warn(dev, "Partner adapter not ready\n");
+ else if (rc) {
+ dev_warn(dev, "Error %d opening adapter\n", rc);
+ goto reg_crq_failed;
+ }
+
+ retrc = 0;
+
+ if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
+ dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
+ goto req_irq_failed;
+ }
+
+ if ((rc = vio_enable_interrupts(vdev))) {
+ dev_err(dev, "Error %d enabling interrupts\n", rc);
+ goto req_irq_failed;
+ }
+
+ crq->cur = 0;
+ LEAVE;
+ return retrc;
+
+req_irq_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_crq_failed:
+ dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+map_failed:
+ free_page((unsigned long)crq->msgs);
+ return retrc;
+}
+
+/**
+ * ibmvfc_free_mem - Free memory for vhost
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * none
+ **/
+static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+
+ ENTER;
+ mempool_destroy(vhost->tgt_pool);
+ kfree(vhost->trace);
+ dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
+ vhost->disc_buf_dma);
+ dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
+ vhost->login_buf, vhost->login_buf_dma);
+ dma_pool_destroy(vhost->sg_pool);
+ dma_unmap_single(vhost->dev, async_q->msg_token,
+ async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+ free_page((unsigned long)async_q->msgs);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_alloc_mem - Allocate memory for vhost
+ * @vhost: ibmvfc host struct
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct device *dev = vhost->dev;
+
+ ENTER;
+ async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!async_q->msgs) {
+ dev_err(dev, "Couldn't allocate async queue.\n");
+ goto nomem;
+ }
+
+ async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
+ async_q->msg_token = dma_map_single(dev, async_q->msgs,
+ async_q->size * sizeof(*async_q->msgs),
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(async_q->msg_token)) {
+ dev_err(dev, "Failed to map async queue\n");
+ goto free_async_crq;
+ }
+
+ vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
+ SG_ALL * sizeof(struct srp_direct_buf),
+ sizeof(struct srp_direct_buf), 0);
+
+ if (!vhost->sg_pool) {
+ dev_err(dev, "Failed to allocate sg pool\n");
+ goto unmap_async_crq;
+ }
+
+ vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
+ &vhost->login_buf_dma, GFP_KERNEL);
+
+ if (!vhost->login_buf) {
+ dev_err(dev, "Couldn't allocate NPIV login buffer\n");
+ goto free_sg_pool;
+ }
+
+ vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+ vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
+ &vhost->disc_buf_dma, GFP_KERNEL);
+
+ if (!vhost->disc_buf) {
+ dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+ goto free_login_buffer;
+ }
+
+ vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
+ sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+
+ if (!vhost->trace)
+ goto free_disc_buffer;
+
+ vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
+ sizeof(struct ibmvfc_target));
+
+ if (!vhost->tgt_pool) {
+ dev_err(dev, "Couldn't allocate target memory pool\n");
+ goto free_trace;
+ }
+
+ LEAVE;
+ return 0;
+
+free_trace:
+ kfree(vhost->trace);
+free_disc_buffer:
+ dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
+ vhost->disc_buf_dma);
+free_login_buffer:
+ dma_free_coherent(dev, sizeof(*vhost->login_buf),
+ vhost->login_buf, vhost->login_buf_dma);
+free_sg_pool:
+ dma_pool_destroy(vhost->sg_pool);
+unmap_async_crq:
+ dma_unmap_single(dev, async_q->msg_token,
+ async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
+free_async_crq:
+ free_page((unsigned long)async_q->msgs);
+nomem:
+ LEAVE;
+ return -ENOMEM;
+}
+
+/**
+ * ibmvfc_probe - Adapter hot plug add entry point
+ * @vdev: vio device struct
+ * @id: vio device id struct
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ struct ibmvfc_host *vhost;
+ struct Scsi_Host *shost;
+ struct device *dev = &vdev->dev;
+ int rc = -ENOMEM;
+
+ ENTER;
+ shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
+ if (!shost) {
+ dev_err(dev, "Couldn't allocate host data\n");
+ goto out;
+ }
+
+ shost->transportt = ibmvfc_transport_template;
+ shost->can_queue = max_requests;
+ shost->max_lun = max_lun;
+ shost->max_id = max_targets;
+ shost->max_sectors = IBMVFC_MAX_SECTORS;
+ shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
+ shost->unique_id = shost->host_no;
+
+ vhost = shost_priv(shost);
+ INIT_LIST_HEAD(&vhost->sent);
+ INIT_LIST_HEAD(&vhost->free);
+ INIT_LIST_HEAD(&vhost->targets);
+ sprintf(vhost->name, IBMVFC_NAME);
+ vhost->host = shost;
+ vhost->dev = dev;
+ vhost->partition_number = -1;
+ vhost->log_level = log_level;
+ strcpy(vhost->partition_name, "UNKNOWN");
+ init_waitqueue_head(&vhost->work_wait_q);
+ init_waitqueue_head(&vhost->init_wait_q);
+
+ if ((rc = ibmvfc_alloc_mem(vhost)))
+ goto free_scsi_host;
+
+ vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
+ shost->host_no);
+
+ if (IS_ERR(vhost->work_thread)) {
+ dev_err(dev, "Couldn't create kernel thread: %ld\n",
+ PTR_ERR(vhost->work_thread));
+ goto free_host_mem;
+ }
+
+ if ((rc = ibmvfc_init_crq(vhost))) {
+ dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
+ goto kill_kthread;
+ }
+
+ if ((rc = ibmvfc_init_event_pool(vhost))) {
+ dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
+ goto release_crq;
+ }
+
+ if ((rc = scsi_add_host(shost, dev)))
+ goto release_event_pool;
+
+ if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
+ &ibmvfc_trace_attr))) {
+ dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
+ goto remove_shost;
+ }
+
+ dev_set_drvdata(dev, vhost);
+ spin_lock(&ibmvfc_driver_lock);
+ list_add_tail(&vhost->queue, &ibmvfc_head);
+ spin_unlock(&ibmvfc_driver_lock);
+
+ ibmvfc_send_crq_init(vhost);
+ scsi_scan_host(shost);
+ return 0;
+
+remove_shost:
+ scsi_remove_host(shost);
+release_event_pool:
+ ibmvfc_free_event_pool(vhost);
+release_crq:
+ ibmvfc_release_crq_queue(vhost);
+kill_kthread:
+ kthread_stop(vhost->work_thread);
+free_host_mem:
+ ibmvfc_free_mem(vhost);
+free_scsi_host:
+ scsi_host_put(shost);
+out:
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ibmvfc_remove - Adapter hot plug remove entry point
+ * @vdev: vio device struct
+ *
+ * Return value:
+ * 0
+ **/
+static int ibmvfc_remove(struct vio_dev *vdev)
+{
+ struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+ unsigned long flags;
+
+ ENTER;
+ ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
+ kthread_stop(vhost->work_thread);
+ fc_remove_host(vhost->host);
+ scsi_remove_host(vhost->host);
+ ibmvfc_release_crq_queue(vhost);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+ ibmvfc_free_event_pool(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ ibmvfc_free_mem(vhost);
+ spin_lock(&ibmvfc_driver_lock);
+ list_del(&vhost->queue);
+ spin_unlock(&ibmvfc_driver_lock);
+ scsi_host_put(vhost->host);
+ LEAVE;
+ return 0;
+}
+
+static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+ {"fcp", "IBM,vfc-client"},
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+
+static struct vio_driver ibmvfc_driver = {
+ .id_table = ibmvfc_device_table,
+ .probe = ibmvfc_probe,
+ .remove = ibmvfc_remove,
+ .driver = {
+ .name = IBMVFC_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static struct fc_function_template ibmvfc_transport_functions = {
+ .show_host_fabric_name = 1,
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_port_type = 1,
+ .show_host_port_id = 1,
+
+ .get_host_port_state = ibmvfc_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .get_host_speed = ibmvfc_get_host_speed,
+ .show_host_speed = 1,
+
+ .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
+ .terminate_rport_io = ibmvfc_terminate_rport_io,
+
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .get_starget_node_name = ibmvfc_get_starget_node_name,
+ .show_starget_node_name = 1,
+
+ .get_starget_port_name = ibmvfc_get_starget_port_name,
+ .show_starget_port_name = 1,
+
+ .get_starget_port_id = ibmvfc_get_starget_port_id,
+ .show_starget_port_id = 1,
+};
+
+/**
+ * ibmvfc_module_init - Initialize the ibmvfc module
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+static int __init ibmvfc_module_init(void)
+{
+ int rc;
+
+ if (!firmware_has_feature(FW_FEATURE_VIO))
+ return -ENODEV;
+
+ printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
+ IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
+
+ ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
+ if (!ibmvfc_transport_template)
+ return -ENOMEM;
+
+ rc = vio_register_driver(&ibmvfc_driver);
+ if (rc)
+ fc_release_transport(ibmvfc_transport_template);
+ return rc;
+}
+
+/**
+ * ibmvfc_module_exit - Teardown the ibmvfc module
+ *
+ * Return value:
+ * nothing
+ **/
+static void __exit ibmvfc_module_exit(void)
+{
+ vio_unregister_driver(&ibmvfc_driver);
+ fc_release_transport(ibmvfc_transport_template);
+}
+
+module_init(ibmvfc_module_init);
+module_exit(ibmvfc_module_exit);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
new file mode 100644
index 000000000000..057f3c01ed61
--- /dev/null
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -0,0 +1,682 @@
+/*
+ * ibmvfc.h -- driver for IBM Power Virtual Fibre Channel Adapter
+ *
+ * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) IBM Corporation, 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _IBMVFC_H
+#define _IBMVFC_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include "viosrp.h"
+
+#define IBMVFC_NAME "ibmvfc"
+#define IBMVFC_DRIVER_VERSION "1.0.0"
+#define IBMVFC_DRIVER_DATE "(July 1, 2008)"
+
+#define IBMVFC_DEFAULT_TIMEOUT 15
+#define IBMVFC_INIT_TIMEOUT 30
+#define IBMVFC_MAX_REQUESTS_DEFAULT 100
+
+#define IBMVFC_DEBUG 0
+#define IBMVFC_MAX_TARGETS 1024
+#define IBMVFC_MAX_LUN 0xffffffff
+#define IBMVFC_MAX_SECTORS 0xffffu
+#define IBMVFC_MAX_DISC_THREADS 4
+#define IBMVFC_TGT_MEMPOOL_SZ 64
+#define IBMVFC_MAX_CMDS_PER_LUN 64
+#define IBMVFC_MAX_INIT_RETRIES 3
+#define IBMVFC_DEV_LOSS_TMO (5 * 60)
+#define IBMVFC_DEFAULT_LOG_LEVEL 2
+#define IBMVFC_MAX_CDB_LEN 16
+
+/*
+ * Ensure we have resources for ERP and initialization:
+ * 1 for ERP
+ * 1 for initialization
+ * 1 for each discovery thread
+ */
+#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + disc_threads)
+
+#define IBMVFC_MAD_SUCCESS 0x00
+#define IBMVFC_MAD_NOT_SUPPORTED 0xF1
+#define IBMVFC_MAD_FAILED 0xF7
+#define IBMVFC_MAD_DRIVER_FAILED 0xEE
+#define IBMVFC_MAD_CRQ_ERROR 0xEF
+
+enum ibmvfc_crq_valid {
+ IBMVFC_CRQ_CMD_RSP = 0x80,
+ IBMVFC_CRQ_INIT_RSP = 0xC0,
+ IBMVFC_CRQ_XPORT_EVENT = 0xFF,
+};
+
+enum ibmvfc_crq_format {
+ IBMVFC_CRQ_INIT = 0x01,
+ IBMVFC_CRQ_INIT_COMPLETE = 0x02,
+ IBMVFC_PARTITION_MIGRATED = 0x06,
+};
+
+enum ibmvfc_cmd_status_flags {
+ IBMVFC_FABRIC_MAPPED = 0x0001,
+ IBMVFC_VIOS_FAILURE = 0x0002,
+ IBMVFC_FC_FAILURE = 0x0004,
+ IBMVFC_FC_SCSI_ERROR = 0x0008,
+ IBMVFC_HW_EVENT_LOGGED = 0x0010,
+ IBMVFC_VIOS_LOGGED = 0x0020,
+};
+
+enum ibmvfc_fabric_mapped_errors {
+ IBMVFC_UNABLE_TO_ESTABLISH = 0x0001,
+ IBMVFC_XPORT_FAULT = 0x0002,
+ IBMVFC_CMD_TIMEOUT = 0x0003,
+ IBMVFC_ENETDOWN = 0x0004,
+ IBMVFC_HW_FAILURE = 0x0005,
+ IBMVFC_LINK_DOWN_ERR = 0x0006,
+ IBMVFC_LINK_DEAD_ERR = 0x0007,
+ IBMVFC_UNABLE_TO_REGISTER = 0x0008,
+ IBMVFC_XPORT_BUSY = 0x000A,
+ IBMVFC_XPORT_DEAD = 0x000B,
+ IBMVFC_CONFIG_ERROR = 0x000C,
+ IBMVFC_NAME_SERVER_FAIL = 0x000D,
+ IBMVFC_LINK_HALTED = 0x000E,
+ IBMVFC_XPORT_GENERAL = 0x8000,
+};
+
+enum ibmvfc_vios_errors {
+ IBMVFC_CRQ_FAILURE = 0x0001,
+ IBMVFC_SW_FAILURE = 0x0002,
+ IBMVFC_INVALID_PARAMETER = 0x0003,
+ IBMVFC_MISSING_PARAMETER = 0x0004,
+ IBMVFC_HOST_IO_BUS = 0x0005,
+ IBMVFC_TRANS_CANCELLED = 0x0006,
+ IBMVFC_TRANS_CANCELLED_IMPLICIT = 0x0007,
+ IBMVFC_INSUFFICIENT_RESOURCE = 0x0008,
+ IBMVFC_COMMAND_FAILED = 0x8000,
+};
+
+enum ibmvfc_mad_types {
+ IBMVFC_NPIV_LOGIN = 0x0001,
+ IBMVFC_DISC_TARGETS = 0x0002,
+ IBMVFC_PORT_LOGIN = 0x0004,
+ IBMVFC_PROCESS_LOGIN = 0x0008,
+ IBMVFC_QUERY_TARGET = 0x0010,
+ IBMVFC_IMPLICIT_LOGOUT = 0x0040,
+ IBMVFC_TMF_MAD = 0x0100,
+};
+
+struct ibmvfc_mad_common {
+ u32 version;
+ u32 reserved;
+ u32 opcode;
+ u16 status;
+ u16 length;
+ u64 tag;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_npiv_login_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+}__attribute__((packed, aligned (8)));
+
+#define IBMVFC_MAX_NAME 256
+
+struct ibmvfc_npiv_login {
+ u32 ostype;
+#define IBMVFC_OS_LINUX 0x02
+ u32 pad;
+ u64 max_dma_len;
+ u32 max_payload;
+ u32 max_response;
+ u32 partition_num;
+ u32 vfc_frame_version;
+ u16 fcp_version;
+ u16 flags;
+#define IBMVFC_CLIENT_MIGRATED 0x01
+#define IBMVFC_FLUSH_ON_HALT 0x02
+ u32 max_cmds;
+ u64 capabilities;
+#define IBMVFC_CAN_MIGRATE 0x01
+ u64 node_name;
+ struct srp_direct_buf async;
+ u8 partition_name[IBMVFC_MAX_NAME];
+ u8 device_name[IBMVFC_MAX_NAME];
+ u8 drc_name[IBMVFC_MAX_NAME];
+ u64 reserved2[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_common_svc_parms {
+ u16 fcph_version;
+ u16 b2b_credit;
+ u16 features;
+ u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
+ u32 ratov;
+ u32 edtov;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_service_parms {
+ struct ibmvfc_common_svc_parms common;
+ u8 port_name[8];
+ u8 node_name[8];
+ u32 class1_parms[4];
+ u32 class2_parms[4];
+ u32 class3_parms[4];
+ u32 obsolete[4];
+ u32 vendor_version[4];
+ u32 services_avail[2];
+ u32 ext_len;
+ u32 reserved[30];
+ u32 clk_sync_qos[2];
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_npiv_login_resp {
+ u32 version;
+ u16 status;
+ u16 error;
+ u32 flags;
+#define IBMVFC_NATIVE_FC 0x01
+#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
+ u32 reserved;
+ u64 capabilites;
+ u32 max_cmds;
+ u32 scsi_id_sz;
+ u64 max_dma_len;
+ u64 scsi_id;
+ u64 port_name;
+ u64 node_name;
+ u64 link_speed;
+ u8 partition_name[IBMVFC_MAX_NAME];
+ u8 device_name[IBMVFC_MAX_NAME];
+ u8 port_loc_code[IBMVFC_MAX_NAME];
+ u8 drc_name[IBMVFC_MAX_NAME];
+ struct ibmvfc_service_parms service_parms;
+ u64 reserved2;
+}__attribute__((packed, aligned (8)));
+
+union ibmvfc_npiv_login_data {
+ struct ibmvfc_npiv_login login;
+ struct ibmvfc_npiv_login_resp resp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_discover_targets_buf {
+ u32 scsi_id[1];
+#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
+};
+
+struct ibmvfc_discover_targets {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+ u32 flags;
+ u16 status;
+ u16 error;
+ u32 bufflen;
+ u32 num_avail;
+ u32 num_written;
+ u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fc_reason {
+ IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
+ IBMVFC_INVALID_VERSION = 0x02,
+ IBMVFC_LOGICAL_ERROR = 0x03,
+ IBMVFC_INVALID_CT_IU_SIZE = 0x04,
+ IBMVFC_LOGICAL_BUSY = 0x05,
+ IBMVFC_PROTOCOL_ERROR = 0x07,
+ IBMVFC_UNABLE_TO_PERFORM_REQ = 0x09,
+ IBMVFC_CMD_NOT_SUPPORTED = 0x0B,
+ IBMVFC_SERVER_NOT_AVAIL = 0x0D,
+ IBMVFC_CMD_IN_PROGRESS = 0x0E,
+ IBMVFC_VENDOR_SPECIFIC = 0xFF,
+};
+
+enum ibmvfc_fc_type {
+ IBMVFC_FABRIC_REJECT = 0x01,
+ IBMVFC_PORT_REJECT = 0x02,
+ IBMVFC_LS_REJECT = 0x03,
+ IBMVFC_FABRIC_BUSY = 0x04,
+ IBMVFC_PORT_BUSY = 0x05,
+ IBMVFC_BASIC_REJECT = 0x06,
+};
+
+enum ibmvfc_gs_explain {
+ IBMVFC_PORT_NAME_NOT_REG = 0x02,
+};
+
+struct ibmvfc_port_login {
+ struct ibmvfc_mad_common common;
+ u64 scsi_id;
+ u16 reserved;
+ u16 fc_service_class;
+ u32 blksz;
+ u32 hdr_per_blk;
+ u16 status;
+ u16 error; /* also fc_reason */
+ u16 fc_explain;
+ u16 fc_type;
+ u32 reserved2;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_prli_svc_parms {
+ u8 type;
+#define IBMVFC_SCSI_FCP_TYPE 0x08
+ u8 type_ext;
+ u16 flags;
+#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
+#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
+#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
+ u32 orig_pa;
+ u32 resp_pa;
+ u32 service_parms;
+#define IBMVFC_PRLI_TASK_RETRY 0x00000200
+#define IBMVFC_PRLI_RETRY 0x00000100
+#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
+#define IBMVFC_PRLI_INITIATOR_FUNC 0x00000020
+#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
+#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
+#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_process_login {
+ struct ibmvfc_mad_common common;
+ u64 scsi_id;
+ struct ibmvfc_prli_svc_parms parms;
+ u8 reserved[48];
+ u16 status;
+ u16 error; /* also fc_reason */
+ u32 reserved2;
+ u64 reserved3[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_query_tgt {
+ struct ibmvfc_mad_common common;
+ u64 wwpn;
+ u64 scsi_id;
+ u16 status;
+ u16 error;
+ u16 fc_explain;
+ u16 fc_type;
+ u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_implicit_logout {
+ struct ibmvfc_mad_common common;
+ u64 old_scsi_id;
+ u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_tmf {
+ struct ibmvfc_mad_common common;
+ u64 scsi_id;
+ struct scsi_lun lun;
+ u32 flags;
+#define IBMVFC_TMF_ABORT_TASK 0x02
+#define IBMVFC_TMF_ABORT_TASK_SET 0x04
+#define IBMVFC_TMF_LUN_RESET 0x10
+#define IBMVFC_TMF_TGT_RESET 0x20
+#define IBMVFC_TMF_LUA_VALID 0x40
+ u32 cancel_key;
+ u32 my_cancel_key;
+#define IBMVFC_TMF_CANCEL_KEY 0x80000000
+ u32 pad;
+ u64 reserved[2];
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_fcp_rsp_info_codes {
+ RSP_NO_FAILURE = 0x00,
+ RSP_TMF_REJECTED = 0x04,
+ RSP_TMF_FAILED = 0x05,
+ RSP_TMF_INVALID_LUN = 0x09,
+};
+
+struct ibmvfc_fcp_rsp_info {
+ u16 reserved;
+ u8 rsp_code;
+ u8 reserved2[4];
+}__attribute__((packed, aligned (2)));
+
+enum ibmvfc_fcp_rsp_flags {
+ FCP_BIDI_RSP = 0x80,
+ FCP_BIDI_READ_RESID_UNDER = 0x40,
+ FCP_BIDI_READ_RESID_OVER = 0x20,
+ FCP_CONF_REQ = 0x10,
+ FCP_RESID_UNDER = 0x08,
+ FCP_RESID_OVER = 0x04,
+ FCP_SNS_LEN_VALID = 0x02,
+ FCP_RSP_LEN_VALID = 0x01,
+};
+
+union ibmvfc_fcp_rsp_data {
+ struct ibmvfc_fcp_rsp_info info;
+ u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_fcp_rsp {
+ u64 reserved;
+ u16 retry_delay_timer;
+ u8 flags;
+ u8 scsi_status;
+ u32 fcp_resid;
+ u32 fcp_sense_len;
+ u32 fcp_rsp_len;
+ union ibmvfc_fcp_rsp_data data;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_cmd_flags {
+ IBMVFC_SCATTERLIST = 0x0001,
+ IBMVFC_NO_MEM_DESC = 0x0002,
+ IBMVFC_READ = 0x0004,
+ IBMVFC_WRITE = 0x0008,
+ IBMVFC_TMF = 0x0080,
+ IBMVFC_CLASS_3_ERR = 0x0100,
+};
+
+enum ibmvfc_fc_task_attr {
+ IBMVFC_SIMPLE_TASK = 0x00,
+ IBMVFC_HEAD_OF_QUEUE = 0x01,
+ IBMVFC_ORDERED_TASK = 0x02,
+ IBMVFC_ACA_TASK = 0x04,
+};
+
+enum ibmvfc_fc_tmf_flags {
+ IBMVFC_ABORT_TASK_SET = 0x02,
+ IBMVFC_LUN_RESET = 0x10,
+ IBMVFC_TARGET_RESET = 0x20,
+};
+
+struct ibmvfc_fcp_cmd_iu {
+ struct scsi_lun lun;
+ u8 crn;
+ u8 pri_task_attr;
+ u8 tmf_flags;
+ u8 add_cdb_len;
+#define IBMVFC_RDDATA 0x02
+#define IBMVFC_WRDATA 0x01
+ u8 cdb[IBMVFC_MAX_CDB_LEN];
+ u32 xfer_len;
+}__attribute__((packed, aligned (4)));
+
+struct ibmvfc_cmd {
+ u64 task_tag;
+ u32 frame_type;
+ u32 payload_len;
+ u32 resp_len;
+ u32 adapter_resid;
+ u16 status;
+ u16 error;
+ u16 flags;
+ u16 response_flags;
+#define IBMVFC_ADAPTER_RESID_VALID 0x01
+ u32 cancel_key;
+ u32 exchange_id;
+ struct srp_direct_buf ext_func;
+ struct srp_direct_buf ioba;
+ struct srp_direct_buf resp;
+ u64 correlation;
+ u64 tgt_scsi_id;
+ u64 tag;
+ u64 reserved3[2];
+ struct ibmvfc_fcp_cmd_iu iu;
+ struct ibmvfc_fcp_rsp rsp;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_trace_start_entry {
+ u32 xfer_len;
+}__attribute__((packed));
+
+struct ibmvfc_trace_end_entry {
+ u16 status;
+ u16 error;
+ u8 fcp_rsp_flags;
+ u8 rsp_code;
+ u8 scsi_status;
+ u8 reserved;
+}__attribute__((packed));
+
+struct ibmvfc_trace_entry {
+ struct ibmvfc_event *evt;
+ u32 time;
+ u32 scsi_id;
+ u32 lun;
+ u8 fmt;
+ u8 op_code;
+ u8 tmf_flags;
+ u8 type;
+#define IBMVFC_TRC_START 0x00
+#define IBMVFC_TRC_END 0xff
+ union {
+ struct ibmvfc_trace_start_entry start;
+ struct ibmvfc_trace_end_entry end;
+ } u;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_crq_formats {
+ IBMVFC_CMD_FORMAT = 0x01,
+ IBMVFC_ASYNC_EVENT = 0x02,
+ IBMVFC_MAD_FORMAT = 0x04,
+};
+
+enum ibmvfc_async_event {
+ IBMVFC_AE_ELS_PLOGI = 0x0001,
+ IBMVFC_AE_ELS_LOGO = 0x0002,
+ IBMVFC_AE_ELS_PRLO = 0x0004,
+ IBMVFC_AE_SCN_NPORT = 0x0008,
+ IBMVFC_AE_SCN_GROUP = 0x0010,
+ IBMVFC_AE_SCN_DOMAIN = 0x0020,
+ IBMVFC_AE_SCN_FABRIC = 0x0040,
+ IBMVFC_AE_LINK_UP = 0x0080,
+ IBMVFC_AE_LINK_DOWN = 0x0100,
+ IBMVFC_AE_LINK_DEAD = 0x0200,
+ IBMVFC_AE_HALT = 0x0400,
+ IBMVFC_AE_RESUME = 0x0800,
+ IBMVFC_AE_ADAPTER_FAILED = 0x1000,
+};
+
+struct ibmvfc_crq {
+ u8 valid;
+ u8 format;
+ u8 reserved[6];
+ u64 ioba;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_crq_queue {
+ struct ibmvfc_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+};
+
+struct ibmvfc_async_crq {
+ u8 valid;
+ u8 pad[3];
+ u32 pad2;
+ u64 event;
+ u64 scsi_id;
+ u64 wwpn;
+ u64 node_name;
+ u64 reserved;
+}__attribute__((packed, aligned (8)));
+
+struct ibmvfc_async_crq_queue {
+ struct ibmvfc_async_crq *msgs;
+ int size, cur;
+ dma_addr_t msg_token;
+};
+
+union ibmvfc_iu {
+ struct ibmvfc_mad_common mad_common;
+ struct ibmvfc_npiv_login_mad npiv_login;
+ struct ibmvfc_discover_targets discover_targets;
+ struct ibmvfc_port_login plogi;
+ struct ibmvfc_process_login prli;
+ struct ibmvfc_query_tgt query_tgt;
+ struct ibmvfc_implicit_logout implicit_logout;
+ struct ibmvfc_tmf tmf;
+ struct ibmvfc_cmd cmd;
+}__attribute__((packed, aligned (8)));
+
+enum ibmvfc_target_action {
+ IBMVFC_TGT_ACTION_NONE = 0,
+ IBMVFC_TGT_ACTION_INIT,
+ IBMVFC_TGT_ACTION_INIT_WAIT,
+ IBMVFC_TGT_ACTION_ADD_RPORT,
+ IBMVFC_TGT_ACTION_DEL_RPORT,
+};
+
+struct ibmvfc_target {
+ struct list_head queue;
+ struct ibmvfc_host *vhost;
+ u64 scsi_id;
+ u64 new_scsi_id;
+ struct fc_rport *rport;
+ int target_id;
+ enum ibmvfc_target_action action;
+ int need_login;
+ int init_retries;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ struct fc_rport_identifiers ids;
+ void (*job_step) (struct ibmvfc_target *);
+ struct kref kref;
+};
+
+/* a unit of work for the hosting partition */
+struct ibmvfc_event {
+ struct list_head queue;
+ struct ibmvfc_host *vhost;
+ struct ibmvfc_target *tgt;
+ struct scsi_cmnd *cmnd;
+ atomic_t free;
+ union ibmvfc_iu *xfer_iu;
+ void (*done) (struct ibmvfc_event *);
+ struct ibmvfc_crq crq;
+ union ibmvfc_iu iu;
+ union ibmvfc_iu *sync_iu;
+ struct srp_direct_buf *ext_list;
+ dma_addr_t ext_list_token;
+ struct completion comp;
+ struct timer_list timer;
+};
+
+/* a pool of event structs for use */
+struct ibmvfc_event_pool {
+ struct ibmvfc_event *events;
+ u32 size;
+ union ibmvfc_iu *iu_storage;
+ dma_addr_t iu_token;
+};
+
+enum ibmvfc_host_action {
+ IBMVFC_HOST_ACTION_NONE = 0,
+ IBMVFC_HOST_ACTION_INIT,
+ IBMVFC_HOST_ACTION_INIT_WAIT,
+ IBMVFC_HOST_ACTION_QUERY,
+ IBMVFC_HOST_ACTION_QUERY_TGTS,
+ IBMVFC_HOST_ACTION_TGT_DEL,
+ IBMVFC_HOST_ACTION_ALLOC_TGTS,
+ IBMVFC_HOST_ACTION_TGT_INIT,
+ IBMVFC_HOST_ACTION_TGT_ADD,
+};
+
+enum ibmvfc_host_state {
+ IBMVFC_NO_CRQ = 0,
+ IBMVFC_INITIALIZING,
+ IBMVFC_ACTIVE,
+ IBMVFC_HALTED,
+ IBMVFC_LINK_DOWN,
+ IBMVFC_LINK_DEAD,
+ IBMVFC_HOST_OFFLINE,
+};
+
+struct ibmvfc_host {
+ char name[8];
+ struct list_head queue;
+ struct Scsi_Host *host;
+ enum ibmvfc_host_state state;
+ enum ibmvfc_host_action action;
+#define IBMVFC_NUM_TRACE_INDEX_BITS 8
+#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
+ struct ibmvfc_trace_entry *trace;
+ u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+ int num_targets;
+ struct list_head targets;
+ struct list_head sent;
+ struct list_head free;
+ struct device *dev;
+ struct ibmvfc_event_pool pool;
+ struct dma_pool *sg_pool;
+ mempool_t *tgt_pool;
+ struct ibmvfc_crq_queue crq;
+ struct ibmvfc_async_crq_queue async_crq;
+ struct ibmvfc_npiv_login login_info;
+ union ibmvfc_npiv_login_data *login_buf;
+ dma_addr_t login_buf_dma;
+ int disc_buf_sz;
+ int log_level;
+ struct ibmvfc_discover_targets_buf *disc_buf;
+ int task_set;
+ int init_retries;
+ int discovery_threads;
+ int client_migrated;
+ int reinit;
+ int events_to_log;
+#define IBMVFC_AE_LINKUP 0x0001
+#define IBMVFC_AE_LINKDOWN 0x0002
+#define IBMVFC_AE_RSCN 0x0004
+ dma_addr_t disc_buf_dma;
+ unsigned int partition_number;
+ char partition_name[97];
+ void (*job_step) (struct ibmvfc_host *);
+ struct task_struct *work_thread;
+ wait_queue_head_t init_wait_q;
+ wait_queue_head_t work_wait_q;
+};
+
+#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
+
+#define tgt_dbg(t, fmt, ...) \
+ DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
+
+#define tgt_err(t, fmt, ...) \
+ dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
+
+#define ibmvfc_dbg(vhost, ...) \
+ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
+
+#define ibmvfc_log(vhost, level, ...) \
+ do { \
+ if (level >= (vhost)->log_level) \
+ dev_err((vhost)->dev, ##__VA_ARGS__); \
+ } while (0)
+
+#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__))
+#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__))
+
+#ifdef CONFIG_SCSI_IBMVFC_TRACE
+#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ibmvfc_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ibmvfc_create_trace_file(kobj, attr) 0
+#define ibmvfc_remove_trace_file(kobj, attr) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 44d8d5163a1a..f843c1383a4b 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -60,6 +60,13 @@
#define IDESCSI_DEBUG_LOG 0
+#if IDESCSI_DEBUG_LOG
+#define debug_log(fmt, args...) \
+ printk(KERN_INFO "ide-scsi: " fmt, ## args)
+#else
+#define debug_log(fmt, args...) do {} while (0)
+#endif
+
/*
* SCSI command transformation layer
*/
@@ -129,14 +136,15 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
#define IDESCSI_PC_RQ 90
/*
- * PIO data transfer routines using the scatter gather table.
+ * PIO data transfer routine using the scatter gather table.
*/
-static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
+static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
+ unsigned int bcount, int write)
{
ide_hwif_t *hwif = drive->hwif;
- int count;
+ xfer_func_t *xf = write ? hwif->output_data : hwif->input_data;
char *buf;
+ int count;
while (bcount) {
count = min(pc->sg->length - pc->b_count, bcount);
@@ -145,13 +153,13 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
local_irq_save(flags);
buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
- pc->sg->offset;
- hwif->input_data(drive, NULL, buf + pc->b_count, count);
+ pc->sg->offset;
+ xf(drive, NULL, buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
buf = sg_virt(pc->sg);
- hwif->input_data(drive, NULL, buf + pc->b_count, count);
+ xf(drive, NULL, buf + pc->b_count, count);
}
bcount -= count; pc->b_count += count;
if (pc->b_count == pc->sg->length) {
@@ -163,51 +171,34 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
}
if (bcount) {
- printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
- ide_pad_transfer(drive, 0, bcount);
+ printk(KERN_ERR "%s: scatter gather table too small, %s\n",
+ drive->name, write ? "padding with zeros"
+ : "discarding data");
+ ide_pad_transfer(drive, write, bcount);
}
}
-static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
- unsigned int bcount)
+static void ide_scsi_hex_dump(u8 *data, int len)
{
- ide_hwif_t *hwif = drive->hwif;
- int count;
- char *buf;
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0);
+}
- while (bcount) {
- count = min(pc->sg->length - pc->b_count, bcount);
- if (PageHighMem(sg_page(pc->sg))) {
- unsigned long flags;
+static int idescsi_end_request(ide_drive_t *, int, int);
- local_irq_save(flags);
- buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
- pc->sg->offset;
- hwif->output_data(drive, NULL, buf + pc->b_count, count);
- kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
- local_irq_restore(flags);
- } else {
- buf = sg_virt(pc->sg);
- hwif->output_data(drive, NULL, buf + pc->b_count, count);
- }
- bcount -= count; pc->b_count += count;
- if (pc->b_count == pc->sg->length) {
- if (!--pc->sg_cnt)
- break;
- pc->sg = sg_next(pc->sg);
- pc->b_count = 0;
- }
- }
+static void ide_scsi_callback(ide_drive_t *drive)
+{
+ idescsi_scsi_t *scsi = drive_to_idescsi(drive);
+ struct ide_atapi_pc *pc = scsi->pc;
- if (bcount) {
- printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
- ide_pad_transfer(drive, 1, bcount);
- }
-}
+ if (pc->flags & PC_FLAG_TIMEDOUT)
+ debug_log("%s: got timed out packet %lu at %lu\n", __func__,
+ pc->scsi_cmd->serial_number, jiffies);
+ /* end this request now - scsi should retry it*/
+ else if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
+ printk(KERN_INFO "Packet command completed, %d bytes"
+ " transferred\n", pc->xferred);
-static void ide_scsi_hex_dump(u8 *data, int len)
-{
- print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0);
+ idescsi_end_request(drive, 1, 0);
}
static int idescsi_check_condition(ide_drive_t *drive,
@@ -228,14 +219,16 @@ static int idescsi_check_condition(ide_drive_t *drive,
kfree(pc);
return -ENOMEM;
}
- ide_init_drive_cmd(rq);
+ blk_rq_init(NULL, rq);
rq->special = (char *) pc;
pc->rq = rq;
pc->buf = buf;
pc->c[0] = REQUEST_SENSE;
pc->c[4] = pc->req_xfer = pc->buf_size = SCSI_SENSE_BUFFERSIZE;
rq->cmd_type = REQ_TYPE_SENSE;
+ rq->cmd_flags |= REQ_PREEMPT;
pc->timeout = jiffies + WAIT_READY;
+ pc->callback = ide_scsi_callback;
/* NOTE! Save the failed packet command in "rq->buffer" */
rq->buffer = (void *) failed_cmd->special;
pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
@@ -244,11 +237,10 @@ static int idescsi_check_condition(ide_drive_t *drive,
ide_scsi_hex_dump(pc->c, 6);
}
rq->rq_disk = scsi->disk;
- return ide_do_drive_cmd(drive, rq, ide_preempt);
+ ide_do_drive_cmd(drive, rq);
+ return 0;
}
-static int idescsi_end_request(ide_drive_t *, int, int);
-
static ide_startstop_t
idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
{
@@ -256,7 +248,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
- hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
+ hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE,
hwif->io_ports.command_addr);
rq->errors++;
@@ -266,20 +258,6 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
return ide_stopped;
}
-static ide_startstop_t
-idescsi_atapi_abort(ide_drive_t *drive, struct request *rq)
-{
-#if IDESCSI_DEBUG_LOG
- printk(KERN_WARNING "idescsi_atapi_abort called for %lu\n",
- ((struct ide_atapi_pc *) rq->special)->scsi_cmd->serial_number);
-#endif
- rq->errors |= ERROR_MAX;
-
- idescsi_end_request(drive, 0, 0);
-
- return ide_stopped;
-}
-
static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
{
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
@@ -351,9 +329,9 @@ static int idescsi_expiry(ide_drive_t *drive)
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
struct ide_atapi_pc *pc = scsi->pc;
-#if IDESCSI_DEBUG_LOG
- printk(KERN_WARNING "idescsi_expiry called for %lu at %lu\n", pc->scsi_cmd->serial_number, jiffies);
-#endif
+ debug_log("%s called for %lu at %lu\n", __func__,
+ pc->scsi_cmd->serial_number, jiffies);
+
pc->flags |= PC_FLAG_TIMEDOUT;
return 0; /* we do not want the ide subsystem to retry */
@@ -365,141 +343,19 @@ static int idescsi_expiry(ide_drive_t *drive)
static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
{
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
- ide_hwif_t *hwif = drive->hwif;
struct ide_atapi_pc *pc = scsi->pc;
- struct request *rq = pc->rq;
- unsigned int temp;
- u16 bcount;
- u8 stat, ireason;
-
-#if IDESCSI_DEBUG_LOG
- printk (KERN_INFO "ide-scsi: Reached idescsi_pc_intr interrupt handler\n");
-#endif /* IDESCSI_DEBUG_LOG */
-
- if (pc->flags & PC_FLAG_TIMEDOUT) {
-#if IDESCSI_DEBUG_LOG
- printk(KERN_WARNING "idescsi_pc_intr: got timed out packet %lu at %lu\n",
- pc->scsi_cmd->serial_number, jiffies);
-#endif
- /* end this request now - scsi should retry it*/
- idescsi_end_request (drive, 1, 0);
- return ide_stopped;
- }
- if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
- pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
-#if IDESCSI_DEBUG_LOG
- printk ("ide-scsi: %s: DMA complete\n", drive->name);
-#endif /* IDESCSI_DEBUG_LOG */
- pc->xferred = pc->req_xfer;
- (void)hwif->dma_ops->dma_end(drive);
- }
-
- /* Clear the interrupt */
- stat = ide_read_status(drive);
-
- if ((stat & DRQ_STAT) == 0) {
- /* No more interrupts */
- if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
- printk(KERN_INFO "Packet command completed, %d bytes"
- " transferred\n", pc->xferred);
- local_irq_enable_in_hardirq();
- if (stat & ERR_STAT)
- rq->errors++;
- idescsi_end_request (drive, 1, 0);
- return ide_stopped;
- }
- bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
- hwif->INB(hwif->io_ports.lbam_addr);
- ireason = hwif->INB(hwif->io_ports.nsect_addr);
-
- if (ireason & CD) {
- printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
- return ide_do_reset (drive);
- }
- if (ireason & IO) {
- temp = pc->xferred + bcount;
- if (temp > pc->req_xfer) {
- if (temp > pc->buf_size) {
- printk(KERN_ERR "ide-scsi: The scsi wants to "
- "send us more data than expected "
- "- discarding data\n");
- temp = pc->buf_size - pc->xferred;
- if (temp) {
- pc->flags &= ~PC_FLAG_WRITING;
- if (pc->sg)
- idescsi_input_buffers(drive, pc,
- temp);
- else
- hwif->input_data(drive, NULL,
- pc->cur_pos, temp);
- printk(KERN_ERR "ide-scsi: transferred"
- " %d of %d bytes\n",
- temp, bcount);
- }
- pc->xferred += temp;
- pc->cur_pos += temp;
- ide_pad_transfer(drive, 0, bcount - temp);
- ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
- return ide_started;
- }
-#if IDESCSI_DEBUG_LOG
- printk (KERN_NOTICE "ide-scsi: The scsi wants to send us more data than expected - allowing transfer\n");
-#endif /* IDESCSI_DEBUG_LOG */
- }
- }
- if (ireason & IO) {
- pc->flags &= ~PC_FLAG_WRITING;
- if (pc->sg)
- idescsi_input_buffers(drive, pc, bcount);
- else
- hwif->input_data(drive, NULL, pc->cur_pos, bcount);
- } else {
- pc->flags |= PC_FLAG_WRITING;
- if (pc->sg)
- idescsi_output_buffers(drive, pc, bcount);
- else
- hwif->output_data(drive, NULL, pc->cur_pos, bcount);
- }
- /* Update the current position */
- pc->xferred += bcount;
- pc->cur_pos += bcount;
- /* And set the interrupt handler again */
- ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
- return ide_started;
+ return ide_pc_intr(drive, pc, idescsi_pc_intr, get_timeout(pc),
+ idescsi_expiry, NULL, NULL, NULL,
+ ide_scsi_io_buffers);
}
static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
- struct ide_atapi_pc *pc = scsi->pc;
- ide_startstop_t startstop;
- u8 ireason;
-
- if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
- printk(KERN_ERR "ide-scsi: Strange, packet command "
- "initiated yet DRQ isn't asserted\n");
- return startstop;
- }
- ireason = hwif->INB(hwif->io_ports.nsect_addr);
- if ((ireason & CD) == 0 || (ireason & IO)) {
- printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
- "issuing a packet command\n");
- return ide_do_reset (drive);
- }
- BUG_ON(HWGROUP(drive)->handler != NULL);
- /* Set the interrupt routine */
- ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
- /* Send the actual packet */
- hwif->output_data(drive, NULL, scsi->pc->c, 12);
-
- if (pc->flags & PC_FLAG_DMA_OK) {
- pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
- hwif->dma_ops->dma_start(drive);
- }
- return ide_started;
+ return ide_transfer_pc(drive, scsi->pc, idescsi_pc_intr,
+ get_timeout(scsi->pc), idescsi_expiry);
}
static inline int idescsi_set_direction(struct ide_atapi_pc *pc)
@@ -545,38 +401,12 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
struct ide_atapi_pc *pc)
{
idescsi_scsi_t *scsi = drive_to_idescsi(drive);
- ide_hwif_t *hwif = drive->hwif;
- u16 bcount;
- u8 dma = 0;
/* Set the current packet command */
scsi->pc = pc;
- /* We haven't transferred any data yet */
- pc->xferred = 0;
- pc->cur_pos = pc->buf;
- /* Request to transfer the entire buffer at once */
- bcount = min(pc->req_xfer, 63 * 1024);
-
- if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
- hwif->sg_mapped = 1;
- dma = !hwif->dma_ops->dma_setup(drive);
- hwif->sg_mapped = 0;
- }
- ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK, bcount, dma);
-
- if (dma)
- pc->flags |= PC_FLAG_DMA_OK;
-
- if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) {
- ide_execute_command(drive, WIN_PACKETCMD, &idescsi_transfer_pc,
- get_timeout(pc), idescsi_expiry);
- return ide_started;
- } else {
- /* Issue the packet command */
- ide_execute_pkt_cmd(drive);
- return idescsi_transfer_pc(drive);
- }
+ return ide_issue_pc(drive, pc, idescsi_transfer_pc,
+ get_timeout(pc), idescsi_expiry);
}
/*
@@ -584,14 +414,22 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
*/
static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *rq, sector_t block)
{
-#if IDESCSI_DEBUG_LOG
- printk (KERN_INFO "dev: %s, cmd: %x, errors: %d\n", rq->rq_disk->disk_name,rq->cmd[0],rq->errors);
- printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors);
-#endif /* IDESCSI_DEBUG_LOG */
+ debug_log("dev: %s, cmd: %x, errors: %d\n", rq->rq_disk->disk_name,
+ rq->cmd[0], rq->errors);
+ debug_log("sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",
+ rq->sector, rq->nr_sectors, rq->current_nr_sectors);
if (blk_sense_request(rq) || blk_special_request(rq)) {
- return idescsi_issue_pc(drive,
- (struct ide_atapi_pc *) rq->special);
+ struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special;
+ idescsi_scsi_t *scsi = drive_to_idescsi(drive);
+
+ if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags))
+ pc->flags |= PC_FLAG_DRQ_INTERRUPT;
+
+ if (drive->using_dma && !idescsi_map_sg(drive, pc))
+ pc->flags |= PC_FLAG_DMA_OK;
+
+ return idescsi_issue_pc(drive, pc);
}
blk_dump_rq_flags(rq, "ide-scsi: unsup command");
idescsi_end_request (drive, 0, 0);
@@ -646,6 +484,8 @@ static void ide_scsi_remove(ide_drive_t *drive)
put_disk(g);
ide_scsi_put(scsi);
+
+ drive->scsi = 0;
}
static int ide_scsi_probe(ide_drive_t *);
@@ -671,7 +511,6 @@ static ide_driver_t idescsi_driver = {
.do_request = idescsi_do_request,
.end_request = idescsi_end_request,
.error = idescsi_atapi_error,
- .abort = idescsi_atapi_abort,
#ifdef CONFIG_IDE_PROC_FS
.proc = idescsi_proc,
#endif
@@ -765,6 +604,8 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
memset (pc->c, 0, 12);
pc->flags = 0;
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ pc->flags |= PC_FLAG_WRITING;
pc->rq = rq;
memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
pc->buf = NULL;
@@ -775,6 +616,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->scsi_cmd = cmd;
pc->done = done;
pc->timeout = jiffies + cmd->timeout_per_command;
+ pc->callback = ide_scsi_callback;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
@@ -785,12 +627,11 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
}
}
- ide_init_drive_cmd (rq);
+ blk_rq_init(NULL, rq);
rq->special = (char *) pc;
rq->cmd_type = REQ_TYPE_SPECIAL;
spin_unlock_irq(host->host_lock);
- rq->rq_disk = scsi->disk;
- (void) ide_do_drive_cmd (drive, rq, ide_end);
+ blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
spin_lock_irq(host->host_lock);
return 0;
abort:
@@ -985,6 +826,8 @@ static int ide_scsi_probe(ide_drive_t *drive)
!(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t))))
return -ENODEV;
+ drive->scsi = 1;
+
g = alloc_disk(1 << PARTN_BITS);
if (!g)
goto out_host_put;
@@ -993,10 +836,10 @@ static int ide_scsi_probe(ide_drive_t *drive)
host->max_id = 1;
-#if IDESCSI_DEBUG_LOG
if (drive->id->last_lun)
- printk(KERN_NOTICE "%s: id->last_lun=%u\n", drive->name, drive->id->last_lun);
-#endif
+ debug_log("%s: id->last_lun=%u\n", drive->name,
+ drive->id->last_lun);
+
if ((drive->id->last_lun & 0x7) != 7)
host->max_lun = (drive->id->last_lun & 0x7) + 1;
else
@@ -1025,6 +868,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
put_disk(g);
out_host_put:
+ drive->scsi = 0;
scsi_host_put(host);
return err;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 999e91ea7451..e7a3a6554425 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -71,6 +71,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/libata.h>
+#include <linux/hdreg.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
@@ -4913,8 +4914,11 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
struct ipr_resource_entry *res;
res = (struct ipr_resource_entry *)sdev->hostdata;
- if (res && ipr_is_gata(res))
+ if (res && ipr_is_gata(res)) {
+ if (cmd == HDIO_GET_IDENTITY)
+ return -ENOTTY;
return ata_scsi_ioctl(sdev, cmd, arg);
+ }
return -EINVAL;
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 72b9b2a0eba3..2a2f0094570f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -64,6 +64,10 @@ MODULE_LICENSE("GPL");
#define BUG_ON(expr)
#endif
+static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+static struct scsi_host_template iscsi_sht;
+static struct iscsi_transport iscsi_tcp_transport;
+
static unsigned int iscsi_max_lun = 512;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
@@ -494,39 +498,43 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
* must be called with session lock
*/
static void
-iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_r2t_info *r2t;
- /* flush ctask's r2t queues */
- while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ /* nothing to do for mgmt tasks */
+ if (!task->sc)
+ return;
+
+ /* flush task's r2t queues */
+ while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
- debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
}
- r2t = tcp_ctask->r2t;
+ r2t = tcp_task->r2t;
if (r2t != NULL) {
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
- tcp_ctask->r2t = NULL;
+ tcp_task->r2t = NULL;
}
}
/**
* iscsi_data_rsp - SCSI Data-In Response processing
* @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
**/
static int
-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
struct iscsi_session *session = conn->session;
- struct scsi_cmnd *sc = ctask->sc;
+ struct scsi_cmnd *sc = task->sc;
int datasn = be32_to_cpu(rhdr->datasn);
unsigned total_in_length = scsi_in(sc)->length;
@@ -534,18 +542,18 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (tcp_conn->in.datalen == 0)
return 0;
- if (tcp_ctask->exp_datasn != datasn) {
- debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
- __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ if (tcp_task->exp_datasn != datasn) {
+ debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+ __func__, tcp_task->exp_datasn, datasn);
return ISCSI_ERR_DATASN;
}
- tcp_ctask->exp_datasn++;
+ tcp_task->exp_datasn++;
- tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
- if (tcp_ctask->data_offset + tcp_conn->in.datalen > total_in_length) {
+ tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+ if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
- __FUNCTION__, tcp_ctask->data_offset,
+ __func__, tcp_task->data_offset,
tcp_conn->in.datalen, total_in_length);
return ISCSI_ERR_DATA_OFFSET;
}
@@ -574,7 +582,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
/**
* iscsi_solicit_data_init - initialize first Data-Out
* @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
* @r2t: R2T info
*
* Notes:
@@ -584,7 +592,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
* This function is called with connection lock taken.
**/
static void
-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_r2t_info *r2t)
{
struct iscsi_data *hdr;
@@ -595,8 +603,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
r2t->solicit_datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
- hdr->itt = ctask->hdr->itt;
+ memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+ hdr->itt = task->hdr->itt;
hdr->exp_statsn = r2t->exp_statsn;
hdr->offset = cpu_to_be32(r2t->data_offset);
if (r2t->data_length > conn->max_xmit_dlength) {
@@ -616,14 +624,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
/**
* iscsi_r2t_rsp - iSCSI R2T Response processing
* @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
**/
static int
-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t;
struct iscsi_session *session = conn->session;
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
int r2tsn = be32_to_cpu(rhdr->r2tsn);
@@ -636,23 +644,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
return ISCSI_ERR_DATALEN;
}
- if (tcp_ctask->exp_datasn != r2tsn){
- debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
- __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ if (tcp_task->exp_datasn != r2tsn){
+ debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+ __func__, tcp_task->exp_datasn, r2tsn);
return ISCSI_ERR_R2TSN;
}
/* fill-in new R2T associated with the task */
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
- if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
iscsi_conn_printk(KERN_INFO, conn,
"dropping R2T itt %d in recovery.\n",
- ctask->itt);
+ task->itt);
return 0;
}
- rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
BUG_ON(!rc);
r2t->exp_statsn = rhdr->statsn;
@@ -660,7 +668,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (r2t->data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -671,12 +679,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_length, session->max_burst);
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
- if (r2t->data_offset + r2t->data_length > scsi_out(ctask->sc)->length) {
+ if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
- r2t->data_offset, scsi_out(ctask->sc)->length);
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ r2t->data_offset, scsi_out(task->sc)->length);
+ __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -684,13 +692,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->ttt = rhdr->ttt; /* no flip */
r2t->solicit_datasn = 0;
- iscsi_solicit_data_init(conn, ctask, r2t);
+ iscsi_solicit_data_init(conn, task, r2t);
- tcp_ctask->exp_datasn = r2tsn + 1;
- __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ tcp_task->exp_datasn = r2tsn + 1;
+ __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
- iscsi_requeue_ctask(ctask);
+ iscsi_requeue_task(task);
return 0;
}
@@ -733,10 +741,8 @@ static int
iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
int rc = 0, opcode, ahslen;
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_cmd_task *ctask;
- uint32_t itt;
+ struct iscsi_task *task;
/* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength);
@@ -754,7 +760,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
opcode = hdr->opcode & ISCSI_OPCODE_MASK;
/* verify itt (itt encoding: age+cid+itt) */
- rc = iscsi_verify_itt(conn, hdr, &itt);
+ rc = iscsi_verify_itt(conn, hdr->itt);
if (rc)
return rc;
@@ -763,16 +769,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
switch(opcode) {
case ISCSI_OP_SCSI_DATA_IN:
- ctask = session->cmds[itt];
spin_lock(&conn->session->lock);
- rc = iscsi_data_rsp(conn, ctask);
- spin_unlock(&conn->session->lock);
- if (rc)
- return rc;
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else
+ rc = iscsi_data_rsp(conn, task);
+ if (rc) {
+ spin_unlock(&conn->session->lock);
+ break;
+ }
+
if (tcp_conn->in.datalen) {
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct hash_desc *rx_hash = NULL;
- struct scsi_data_buffer *sdb = scsi_in(ctask->sc);
+ struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
* Setup copy of Data-In into the Scsi_Cmnd
@@ -787,17 +798,21 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
"datalen=%d)\n", tcp_conn,
- tcp_ctask->data_offset,
+ tcp_task->data_offset,
tcp_conn->in.datalen);
- return iscsi_segment_seek_sg(&tcp_conn->in.segment,
- sdb->table.sgl,
- sdb->table.nents,
- tcp_ctask->data_offset,
- tcp_conn->in.datalen,
- iscsi_tcp_process_data_in,
- rx_hash);
+ rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+ sdb->table.sgl,
+ sdb->table.nents,
+ tcp_task->data_offset,
+ tcp_conn->in.datalen,
+ iscsi_tcp_process_data_in,
+ rx_hash);
+ spin_unlock(&conn->session->lock);
+ return rc;
}
- /* fall through */
+ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+ spin_unlock(&conn->session->lock);
+ break;
case ISCSI_OP_SCSI_CMD_RSP:
if (tcp_conn->in.datalen) {
iscsi_tcp_data_recv_prep(tcp_conn);
@@ -806,15 +821,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
case ISCSI_OP_R2T:
- ctask = session->cmds[itt];
- if (ahslen)
+ spin_lock(&conn->session->lock);
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ rc = ISCSI_ERR_BAD_ITT;
+ else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
- else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
- spin_lock(&session->lock);
- rc = iscsi_r2t_rsp(conn, ctask);
- spin_unlock(&session->lock);
- } else
+ else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+ rc = iscsi_r2t_rsp(conn, task);
+ else
rc = ISCSI_ERR_PROTO;
+ spin_unlock(&conn->session->lock);
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
@@ -1176,7 +1193,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
conn->hdrdgst_en? ", digest enabled" : "");
/* Clear the data segment - needs to be filled in by the
@@ -1185,7 +1202,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
/* If header digest is enabled, compute the CRC and
* place the digest into the same buffer. We make
- * sure that both iscsi_tcp_ctask and mtask have
+ * sure that both iscsi_tcp_task and mtask have
* sufficient room.
*/
if (conn->hdrdgst_en) {
@@ -1217,7 +1234,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len;
- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
tcp_conn, offset, len,
conn->datadgst_en? ", digest enabled" : "");
@@ -1242,7 +1259,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
struct hash_desc *tx_hash = NULL;
unsigned int hdr_spec_len;
- debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
conn->datadgst_en? ", digest enabled" : "");
/* Make sure the datalen matches what the caller
@@ -1260,7 +1277,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
/**
* iscsi_solicit_data_cont - initialize next Data-Out
* @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
* @r2t: R2T info
* @left: bytes left to transfer
*
@@ -1271,7 +1288,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
* Called under connection lock.
**/
static int
-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_r2t_info *r2t)
{
struct iscsi_data *hdr;
@@ -1288,8 +1305,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
r2t->solicit_datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
- hdr->itt = ctask->hdr->itt;
+ memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+ hdr->itt = task->hdr->itt;
hdr->exp_statsn = r2t->exp_statsn;
new_offset = r2t->data_offset + r2t->sent;
hdr->offset = cpu_to_be32(new_offset);
@@ -1307,89 +1324,76 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
}
/**
- * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
* @conn: iscsi connection
- * @ctask: scsi command task
+ * @task: scsi command task
* @sc: scsi command
**/
static int
-iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_init(struct iscsi_task *task)
{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_conn *conn = ctask->conn;
- struct scsi_cmnd *sc = ctask->sc;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct scsi_cmnd *sc = task->sc;
int err;
- BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
- tcp_ctask->sent = 0;
- tcp_ctask->exp_datasn = 0;
+ if (!sc) {
+ /*
+ * mgmt tasks do not have a scatterlist since they come
+ * in from the iscsi interface.
+ */
+ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+ task->itt);
+
+ /* Prepare PDU, optionally w/ immediate data */
+ iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+
+ /* If we have immediate data, attach a payload */
+ if (task->data_count)
+ iscsi_tcp_send_linear_data_prepare(conn, task->data,
+ task->data_count);
+ return 0;
+ }
+
+ BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+ tcp_task->sent = 0;
+ tcp_task->exp_datasn = 0;
/* Prepare PDU, optionally w/ immediate data */
- debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
- conn->id, ctask->itt, ctask->imm_count,
- ctask->unsol_count);
- iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+ debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+ conn->id, task->itt, task->imm_count,
+ task->unsol_count);
+ iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
- if (!ctask->imm_count)
+ if (!task->imm_count)
return 0;
/* If we have immediate data, attach a payload */
err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
scsi_out(sc)->table.nents,
- 0, ctask->imm_count);
+ 0, task->imm_count);
if (err)
return err;
- tcp_ctask->sent += ctask->imm_count;
- ctask->imm_count = 0;
- return 0;
-}
-
-/**
- * iscsi_tcp_mtask_xmit - xmit management(immediate) task
- * @conn: iscsi connection
- * @mtask: task management task
- *
- * Notes:
- * The function can return -EAGAIN in which case caller must
- * call it again later, or recover. '0' return code means successful
- * xmit.
- **/
-static int
-iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
- int rc;
-
- /* Flush any pending data first. */
- rc = iscsi_tcp_flush(conn);
- if (rc < 0)
- return rc;
-
- if (mtask->hdr->itt == RESERVED_ITT) {
- struct iscsi_session *session = conn->session;
-
- spin_lock_bh(&session->lock);
- iscsi_free_mgmt_task(conn, mtask);
- spin_unlock_bh(&session->lock);
- }
-
+ tcp_task->sent += task->imm_count;
+ task->imm_count = 0;
return 0;
}
/*
- * iscsi_tcp_ctask_xmit - xmit normal PDU task
- * @conn: iscsi connection
- * @ctask: iscsi command task
+ * iscsi_tcp_task_xmit - xmit normal PDU task
+ * @task: iscsi command task
*
* We're expected to return 0 when everything was transmitted succesfully,
* -EAGAIN if there's still data in the queue, or != 0 for any other kind
* of error.
*/
static int
-iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_task_xmit(struct iscsi_task *task)
{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct scsi_cmnd *sc = ctask->sc;
- struct scsi_data_buffer *sdb = scsi_out(sc);
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
+ struct scsi_cmnd *sc = task->sc;
+ struct scsi_data_buffer *sdb;
int rc = 0;
flush:
@@ -1398,31 +1402,39 @@ flush:
if (rc < 0)
return rc;
+ /* mgmt command */
+ if (!sc) {
+ if (task->hdr->itt == RESERVED_ITT)
+ iscsi_put_task(task);
+ return 0;
+ }
+
/* Are we done already? */
if (sc->sc_data_direction != DMA_TO_DEVICE)
return 0;
- if (ctask->unsol_count != 0) {
- struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+ sdb = scsi_out(sc);
+ if (task->unsol_count != 0) {
+ struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
/* Prepare a header for the unsolicited PDU.
* The amount of data we want to send will be
- * in ctask->data_count.
+ * in task->data_count.
* FIXME: return the data count instead.
*/
- iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+ iscsi_prep_unsolicit_data_pdu(task, hdr);
debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
- ctask->itt, tcp_ctask->sent, ctask->data_count);
+ task->itt, tcp_task->sent, task->data_count);
iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
- sdb->table.nents, tcp_ctask->sent,
- ctask->data_count);
+ sdb->table.nents, tcp_task->sent,
+ task->data_count);
if (rc)
goto fail;
- tcp_ctask->sent += ctask->data_count;
- ctask->unsol_count -= ctask->data_count;
+ tcp_task->sent += task->data_count;
+ task->unsol_count -= task->data_count;
goto flush;
} else {
struct iscsi_session *session = conn->session;
@@ -1431,22 +1443,22 @@ flush:
/* All unsolicited PDUs sent. Check for solicited PDUs.
*/
spin_lock_bh(&session->lock);
- r2t = tcp_ctask->r2t;
+ r2t = tcp_task->r2t;
if (r2t != NULL) {
/* Continue with this R2T? */
- if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ if (!iscsi_solicit_data_cont(conn, task, r2t)) {
debug_scsi(" done with r2t %p\n", r2t);
- __kfifo_put(tcp_ctask->r2tpool.queue,
+ __kfifo_put(tcp_task->r2tpool.queue,
(void*)&r2t, sizeof(void*));
- tcp_ctask->r2t = r2t = NULL;
+ tcp_task->r2t = r2t = NULL;
}
}
if (r2t == NULL) {
- __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
sizeof(void*));
- r2t = tcp_ctask->r2t;
+ r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
@@ -1457,7 +1469,7 @@ flush:
}
debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
- r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t, r2t->solicit_datasn - 1, task->itt,
r2t->data_offset + r2t->sent, r2t->data_count);
iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
@@ -1469,7 +1481,7 @@ flush:
r2t->data_count);
if (rc)
goto fail;
- tcp_ctask->sent += r2t->data_count;
+ tcp_task->sent += r2t->data_count;
r2t->sent += r2t->data_count;
goto flush;
}
@@ -1486,7 +1498,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
- cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
@@ -1496,18 +1508,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
*/
conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
- tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
- if (!tcp_conn)
- goto tcp_conn_alloc_fail;
-
- conn->dd_data = tcp_conn;
+ tcp_conn = conn->dd_data;
tcp_conn->iscsi_conn = conn;
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->tx_hash.flags = 0;
if (IS_ERR(tcp_conn->tx_hash.tfm))
- goto free_tcp_conn;
+ goto free_conn;
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
@@ -1519,14 +1527,12 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
free_tx_tfm:
crypto_free_hash(tcp_conn->tx_hash.tfm);
-free_tcp_conn:
+free_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
"loading error. Make sure the crc32c "
"module is built as a module or into the "
"kernel\n");
- kfree(tcp_conn);
-tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
return NULL;
}
@@ -1547,7 +1553,6 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
spin_lock_bh(&session->lock);
tcp_conn->sock = NULL;
- conn->recv_lock = NULL;
spin_unlock_bh(&session->lock);
sockfd_put(sock);
}
@@ -1559,20 +1564,32 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
iscsi_tcp_release_conn(conn);
- iscsi_conn_teardown(cls_conn);
if (tcp_conn->tx_hash.tfm)
crypto_free_hash(tcp_conn->tx_hash.tfm);
if (tcp_conn->rx_hash.tfm)
crypto_free_hash(tcp_conn->rx_hash.tfm);
- kfree(tcp_conn);
+ iscsi_conn_teardown(cls_conn);
}
static void
iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ /* userspace may have goofed up and not bound us */
+ if (!tcp_conn->sock)
+ return;
+ /*
+ * Make sure our recv side is stopped.
+ * Older tools called conn stop before ep_disconnect
+ * so IO could still be coming in.
+ */
+ write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
iscsi_conn_stop(cls_conn, flag);
iscsi_tcp_release_conn(conn);
@@ -1623,6 +1640,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_host *ihost = shost_priv(shost);
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct sock *sk;
@@ -1646,8 +1665,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
- &conn->local_port, kernel_getsockname);
+ err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+ &ihost->local_port, kernel_getsockname);
if (err)
goto free_socket;
@@ -1664,13 +1683,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
- /* FIXME: disable Nagle's algorithm */
-
- /*
- * Intercept TCP callbacks for sendfile like receive
- * processing.
- */
- conn->recv_lock = &sk->sk_callback_lock;
iscsi_conn_set_callbacks(conn);
tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
/*
@@ -1684,21 +1696,6 @@ free_socket:
return err;
}
-/* called with host lock */
-static void
-iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
-{
- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
-
- /* Prepare PDU, optionally w/ immediate data */
- iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
-
- /* If we have immediate data, attach a payload */
- if (mtask->data_count)
- iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
- mtask->data_count);
-}
-
static int
iscsi_r2tpool_alloc(struct iscsi_session *session)
{
@@ -1709,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
* initialize per-task: R2T pool and xmit queue
*/
for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
- struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_task *task = session->cmds[cmd_i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
/*
* pre-allocated x4 as much r2ts to handle race when
@@ -1719,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
*/
/* R2T pool */
- if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
sizeof(struct iscsi_r2t_info))) {
goto r2t_alloc_fail;
}
/* R2T xmit queue */
- tcp_ctask->r2tqueue = kfifo_alloc(
+ tcp_task->r2tqueue = kfifo_alloc(
session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
- if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
- iscsi_pool_free(&tcp_ctask->r2tpool);
+ if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+ iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
}
@@ -1737,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
r2t_alloc_fail:
for (i = 0; i < cmd_i; i++) {
- struct iscsi_cmd_task *ctask = session->cmds[i];
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_ctask->r2tqueue);
- iscsi_pool_free(&tcp_ctask->r2tpool);
+ kfifo_free(tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
}
return -ENOMEM;
}
@@ -1752,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
int i;
for (i = 0; i < session->cmds_max; i++) {
- struct iscsi_cmd_task *ctask = session->cmds[i];
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_task *task = session->cmds[i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_ctask->r2tqueue);
- iscsi_pool_free(&tcp_ctask->r2tpool);
+ kfifo_free(tcp_task->r2tqueue);
+ iscsi_pool_free(&tcp_task->r2tpool);
}
}
@@ -1821,29 +1818,6 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
return len;
}
-static int
-iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
- char *buf)
-{
- struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
- int len;
-
- switch (param) {
- case ISCSI_HOST_PARAM_IPADDRESS:
- spin_lock_bh(&session->lock);
- if (!session->leadconn)
- len = -ENODEV;
- else
- len = sprintf(buf, "%s\n",
- session->leadconn->local_address);
- spin_unlock_bh(&session->lock);
- break;
- default:
- return iscsi_host_get_param(shost, param, buf);
- }
- return len;
-}
-
static void
iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
{
@@ -1869,54 +1843,70 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
}
static struct iscsi_cls_session *
-iscsi_tcp_session_create(struct iscsi_transport *iscsit,
- struct scsi_transport_template *scsit,
- uint16_t cmds_max, uint16_t qdepth,
- uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+ uint16_t qdepth, uint32_t initial_cmdsn,
+ uint32_t *hostno)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
- uint32_t hn;
+ struct Scsi_Host *shost;
int cmd_i;
- cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
- sizeof(struct iscsi_tcp_cmd_task),
- sizeof(struct iscsi_tcp_mgmt_task),
- initial_cmdsn, &hn);
- if (!cls_session)
+ if (ep) {
+ printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
return NULL;
- *hostno = hn;
-
- session = class_to_transport_session(cls_session);
- for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
- struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
-
- ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
- ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
}
- for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
- struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
- struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+ shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+ if (!shost)
+ return NULL;
+ shost->transportt = iscsi_tcp_scsi_transport;
+ shost->max_lun = iscsi_max_lun;
+ shost->max_id = 0;
+ shost->max_channel = 0;
+ shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+
+ if (iscsi_host_add(shost, NULL))
+ goto free_host;
+ *hostno = shost->host_no;
+
+ cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+ sizeof(struct iscsi_tcp_task),
+ initial_cmdsn, 0);
+ if (!cls_session)
+ goto remove_host;
+ session = cls_session->dd_data;
+
+ shost->can_queue = session->scsi_cmds_max;
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+ struct iscsi_task *task = session->cmds[cmd_i];
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
- mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ task->hdr = &tcp_task->hdr.cmd_hdr;
+ task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
}
- if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
- goto r2tpool_alloc_fail;
-
+ if (iscsi_r2tpool_alloc(session))
+ goto remove_session;
return cls_session;
-r2tpool_alloc_fail:
+remove_session:
iscsi_session_teardown(cls_session);
+remove_host:
+ iscsi_host_remove(shost);
+free_host:
+ iscsi_host_free(shost);
return NULL;
}
static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
- iscsi_r2tpool_free(class_to_transport_session(cls_session));
- iscsi_session_teardown(cls_session);
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+
+ iscsi_r2tpool_free(cls_session->dd_data);
+
+ iscsi_host_remove(shost);
+ iscsi_host_free(shost);
}
static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
@@ -1971,14 +1961,11 @@ static struct iscsi_transport iscsi_tcp_transport = {
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
ISCSI_LU_RESET_TMO |
- ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ ISCSI_PING_TMO | ISCSI_RECV_TMO |
+ ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME |
ISCSI_HOST_NETDEV_NAME,
- .host_template = &iscsi_sht,
- .conndata_size = sizeof(struct iscsi_conn),
- .max_conn = 1,
- .max_cmd_len = 16,
/* session management */
.create_session = iscsi_tcp_session_create,
.destroy_session = iscsi_tcp_session_destroy,
@@ -1992,16 +1979,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_tcp_conn_stop,
/* iscsi host params */
- .get_host_param = iscsi_tcp_host_get_param,
+ .get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
- .init_cmd_task = iscsi_tcp_ctask_init,
- .init_mgmt_task = iscsi_tcp_mtask_init,
- .xmit_cmd_task = iscsi_tcp_ctask_xmit,
- .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
- .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ .init_task = iscsi_tcp_task_init,
+ .xmit_task = iscsi_tcp_task_xmit,
+ .cleanup_task = iscsi_tcp_cleanup_task,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
@@ -2014,9 +1999,10 @@ iscsi_tcp_init(void)
iscsi_max_lun);
return -EINVAL;
}
- iscsi_tcp_transport.max_lun = iscsi_max_lun;
- if (!iscsi_register_transport(&iscsi_tcp_transport))
+ iscsi_tcp_scsi_transport = iscsi_register_transport(
+ &iscsi_tcp_transport);
+ if (!iscsi_tcp_scsi_transport)
return -ENODEV;
return 0;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index ed0b991d1e72..498d8ca39848 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -103,11 +103,6 @@ struct iscsi_data_task {
char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
};
-struct iscsi_tcp_mgmt_task {
- struct iscsi_hdr hdr;
- char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
-};
-
struct iscsi_r2t_info {
__be32 ttt; /* copied from R2T */
__be32 exp_statsn; /* copied from R2T */
@@ -119,7 +114,7 @@ struct iscsi_r2t_info {
struct iscsi_data_task dtask; /* Data-Out header buf */
};
-struct iscsi_tcp_cmd_task {
+struct iscsi_tcp_task {
struct iscsi_hdr_buff {
struct iscsi_cmd cmd_hdr;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b43bf1d60dac..299e075a7b34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,14 +38,6 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/libiscsi.h>
-struct iscsi_session *
-class_to_transport_session(struct iscsi_cls_session *cls_session)
-{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- return iscsi_hostdata(shost->hostdata);
-}
-EXPORT_SYMBOL_GPL(class_to_transport_session);
-
/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
#define SNA32_CHECK 2147483648UL
@@ -87,68 +79,70 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
* xmit thread
*/
if (!list_empty(&session->leadconn->xmitqueue) ||
- !list_empty(&session->leadconn->mgmtqueue))
- scsi_queue_work(session->host,
- &session->leadconn->xmitwork);
+ !list_empty(&session->leadconn->mgmtqueue)) {
+ if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+ scsi_queue_work(session->host,
+ &session->leadconn->xmitwork);
+ }
}
}
EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
-void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
struct iscsi_data *hdr)
{
- struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_conn *conn = task->conn;
memset(hdr, 0, sizeof(struct iscsi_data));
hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
- hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
- ctask->unsol_datasn++;
+ hdr->datasn = cpu_to_be32(task->unsol_datasn);
+ task->unsol_datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+ memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
- hdr->itt = ctask->hdr->itt;
+ hdr->itt = task->hdr->itt;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
- hdr->offset = cpu_to_be32(ctask->unsol_offset);
+ hdr->offset = cpu_to_be32(task->unsol_offset);
- if (ctask->unsol_count > conn->max_xmit_dlength) {
+ if (task->unsol_count > conn->max_xmit_dlength) {
hton24(hdr->dlength, conn->max_xmit_dlength);
- ctask->data_count = conn->max_xmit_dlength;
- ctask->unsol_offset += ctask->data_count;
+ task->data_count = conn->max_xmit_dlength;
+ task->unsol_offset += task->data_count;
hdr->flags = 0;
} else {
- hton24(hdr->dlength, ctask->unsol_count);
- ctask->data_count = ctask->unsol_count;
+ hton24(hdr->dlength, task->unsol_count);
+ task->data_count = task->unsol_count;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
}
}
EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
-static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
{
- unsigned exp_len = ctask->hdr_len + len;
+ unsigned exp_len = task->hdr_len + len;
- if (exp_len > ctask->hdr_max) {
+ if (exp_len > task->hdr_max) {
WARN_ON(1);
return -EINVAL;
}
WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
- ctask->hdr_len = exp_len;
+ task->hdr_len = exp_len;
return 0;
}
/*
* make an extended cdb AHS
*/
-static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
{
- struct scsi_cmnd *cmd = ctask->sc;
+ struct scsi_cmnd *cmd = task->sc;
unsigned rlen, pad_len;
unsigned short ahslength;
struct iscsi_ecdb_ahdr *ecdb_ahdr;
int rc;
- ecdb_ahdr = iscsi_next_hdr(ctask);
+ ecdb_ahdr = iscsi_next_hdr(task);
rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
@@ -156,7 +150,7 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
pad_len = iscsi_padding(rlen);
- rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
+ rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
if (rc)
return rc;
@@ -171,19 +165,19 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
"rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
- cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
+ cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
return 0;
}
-static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
{
- struct scsi_cmnd *sc = ctask->sc;
+ struct scsi_cmnd *sc = task->sc;
struct iscsi_rlength_ahdr *rlen_ahdr;
int rc;
- rlen_ahdr = iscsi_next_hdr(ctask);
- rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
+ rlen_ahdr = iscsi_next_hdr(task);
+ rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
if (rc)
return rc;
@@ -203,28 +197,28 @@ static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
/**
* iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
- * @ctask: iscsi cmd task
+ * @task: iscsi task
*
* Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
* fields like dlength or final based on how much data it sends
*/
-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
{
- struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
- struct iscsi_cmd *hdr = ctask->hdr;
- struct scsi_cmnd *sc = ctask->sc;
+ struct iscsi_cmd *hdr = task->hdr;
+ struct scsi_cmnd *sc = task->sc;
unsigned hdrlength, cmd_len;
int rc;
- ctask->hdr_len = 0;
- rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ task->hdr_len = 0;
+ rc = iscsi_add_hdr(task, sizeof(*hdr));
if (rc)
return rc;
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
- hdr->itt = build_itt(ctask->itt, session->age);
+ hdr->itt = build_itt(task->itt, session->age);
hdr->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
@@ -232,17 +226,17 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
if (cmd_len < ISCSI_CDB_SIZE)
memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
else if (cmd_len > ISCSI_CDB_SIZE) {
- rc = iscsi_prep_ecdb_ahs(ctask);
+ rc = iscsi_prep_ecdb_ahs(task);
if (rc)
return rc;
cmd_len = ISCSI_CDB_SIZE;
}
memcpy(hdr->cdb, sc->cmnd, cmd_len);
- ctask->imm_count = 0;
+ task->imm_count = 0;
if (scsi_bidi_cmnd(sc)) {
hdr->flags |= ISCSI_FLAG_CMD_READ;
- rc = iscsi_prep_bidi_ahs(ctask);
+ rc = iscsi_prep_bidi_ahs(task);
if (rc)
return rc;
}
@@ -264,28 +258,28 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
*
* pad_count bytes to be sent as zero-padding
*/
- ctask->unsol_count = 0;
- ctask->unsol_offset = 0;
- ctask->unsol_datasn = 0;
+ task->unsol_count = 0;
+ task->unsol_offset = 0;
+ task->unsol_datasn = 0;
if (session->imm_data_en) {
if (out_len >= session->first_burst)
- ctask->imm_count = min(session->first_burst,
+ task->imm_count = min(session->first_burst,
conn->max_xmit_dlength);
else
- ctask->imm_count = min(out_len,
+ task->imm_count = min(out_len,
conn->max_xmit_dlength);
- hton24(hdr->dlength, ctask->imm_count);
+ hton24(hdr->dlength, task->imm_count);
} else
zero_data(hdr->dlength);
if (!session->initial_r2t_en) {
- ctask->unsol_count = min(session->first_burst, out_len)
- - ctask->imm_count;
- ctask->unsol_offset = ctask->imm_count;
+ task->unsol_count = min(session->first_burst, out_len)
+ - task->imm_count;
+ task->unsol_offset = task->imm_count;
}
- if (!ctask->unsol_count)
+ if (!task->unsol_count)
/* No unsolicit Data-Out's */
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
} else {
@@ -298,7 +292,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
}
/* calculate size of additional header segments (AHSs) */
- hdrlength = ctask->hdr_len - sizeof(*hdr);
+ hdrlength = task->hdr_len - sizeof(*hdr);
WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
hdrlength /= ISCSI_PAD_LEN;
@@ -306,76 +300,115 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
WARN_ON(hdrlength >= 256);
hdr->hlength = hdrlength & 0xFF;
- if (conn->session->tt->init_cmd_task(conn->ctask))
- return EIO;
+ if (conn->session->tt->init_task &&
+ conn->session->tt->init_task(task))
+ return -EIO;
+
+ task->state = ISCSI_TASK_RUNNING;
+ list_move_tail(&task->running, &conn->run_list);
conn->scsicmd_pdus_cnt++;
- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
- "len %d bidi_len %d cmdsn %d win %d]\n",
- scsi_bidi_cmnd(sc) ? "bidirectional" :
- sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
- conn->id, sc, sc->cmnd[0], ctask->itt,
- scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+ "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+ "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+ "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+ scsi_bufflen(sc),
+ scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
return 0;
}
/**
- * iscsi_complete_command - return command back to scsi-ml
- * @ctask: iscsi cmd task
+ * iscsi_complete_command - finish a task
+ * @task: iscsi cmd task
*
* Must be called with session lock.
- * This function returns the scsi command to scsi-ml and returns
- * the cmd task to the pool of available cmd tasks.
+ * This function returns the scsi command to scsi-ml or cleans
+ * up mgmt tasks then returns the task to the pool.
*/
-static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+static void iscsi_complete_command(struct iscsi_task *task)
{
- struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
- struct scsi_cmnd *sc = ctask->sc;
+ struct scsi_cmnd *sc = task->sc;
- ctask->state = ISCSI_TASK_COMPLETED;
- ctask->sc = NULL;
- /* SCSI eh reuses commands to verify us */
- sc->SCp.ptr = NULL;
- if (conn->ctask == ctask)
- conn->ctask = NULL;
- list_del_init(&ctask->running);
- __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
- sc->scsi_done(sc);
+ list_del_init(&task->running);
+ task->state = ISCSI_TASK_COMPLETED;
+ task->sc = NULL;
+
+ if (conn->task == task)
+ conn->task = NULL;
+ /*
+ * login task is preallocated so do not free
+ */
+ if (conn->login_task == task)
+ return;
+
+ __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+
+ if (conn->ping_task == task)
+ conn->ping_task = NULL;
+
+ if (sc) {
+ task->sc = NULL;
+ /* SCSI eh reuses commands to verify us */
+ sc->SCp.ptr = NULL;
+ /*
+ * queue command may call this to free the task, but
+ * not have setup the sc callback
+ */
+ if (sc->scsi_done)
+ sc->scsi_done(sc);
+ }
+}
+
+void __iscsi_get_task(struct iscsi_task *task)
+{
+ atomic_inc(&task->refcount);
}
+EXPORT_SYMBOL_GPL(__iscsi_get_task);
-static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+static void __iscsi_put_task(struct iscsi_task *task)
{
- atomic_inc(&ctask->refcount);
+ if (atomic_dec_and_test(&task->refcount))
+ iscsi_complete_command(task);
}
-static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_put_task(struct iscsi_task *task)
{
- if (atomic_dec_and_test(&ctask->refcount))
- iscsi_complete_command(ctask);
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->lock);
+ __iscsi_put_task(task);
+ spin_unlock_bh(&session->lock);
}
+EXPORT_SYMBOL_GPL(iscsi_put_task);
/*
* session lock must be held
*/
-static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
int err)
{
struct scsi_cmnd *sc;
- sc = ctask->sc;
+ sc = task->sc;
if (!sc)
return;
- if (ctask->state == ISCSI_TASK_PENDING)
+ if (task->state == ISCSI_TASK_PENDING)
/*
* cmd never made it to the xmit thread, so we should not count
* the cmd in the sequencing
*/
conn->session->queued_cmdsn--;
else
- conn->session->tt->cleanup_cmd_task(conn, ctask);
+ conn->session->tt->cleanup_task(conn, task);
+ /*
+ * Check if cleanup_task dropped the lock and the command completed,
+ */
+ if (!task->sc)
+ return;
sc->result = err;
if (!scsi_bidi_cmnd(sc))
@@ -384,39 +417,63 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
scsi_out(sc)->resid = scsi_out(sc)->length;
scsi_in(sc)->resid = scsi_in(sc)->length;
}
- if (conn->ctask == ctask)
- conn->ctask = NULL;
+
+ if (conn->task == task)
+ conn->task = NULL;
/* release ref from queuecommand */
- __iscsi_put_ctask(ctask);
+ __iscsi_put_task(task);
}
-/**
- * iscsi_free_mgmt_task - return mgmt task back to pool
- * @conn: iscsi connection
- * @mtask: mtask
- *
- * Must be called with session lock.
- */
-void iscsi_free_mgmt_task(struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask)
+static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
- list_del_init(&mtask->running);
- if (conn->login_mtask == mtask)
- return;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ return -ENOTCONN;
+
+ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+ /*
+ * pre-format CmdSN for outgoing PDU.
+ */
+ nop->cmdsn = cpu_to_be32(session->cmdsn);
+ if (hdr->itt != RESERVED_ITT) {
+ hdr->itt = build_itt(task->itt, session->age);
+ /*
+ * TODO: We always use immediate, so we never hit this.
+ * If we start to send tmfs or nops as non-immediate then
+ * we should start checking the cmdsn numbers for mgmt tasks.
+ */
+ if (conn->c_stage == ISCSI_CONN_STARTED &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ session->queued_cmdsn++;
+ session->cmdsn++;
+ }
+ }
- if (conn->ping_mtask == mtask)
- conn->ping_mtask = NULL;
- __kfifo_put(conn->session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*));
+ if (session->tt->init_task)
+ session->tt->init_task(task);
+
+ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+ session->state = ISCSI_STATE_LOGGING_OUT;
+
+ list_move_tail(&task->running, &conn->mgmt_run_list);
+ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+ task->data_count);
+ return 0;
}
-EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
-static struct iscsi_mgmt_task *
+static struct iscsi_task *
__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_mgmt_task *mtask;
+ struct iscsi_task *task;
if (session->state == ISCSI_STATE_TERMINATE)
return NULL;
@@ -426,29 +483,56 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
/*
* Login and Text are sent serially, in
* request-followed-by-response sequence.
- * Same mtask can be used. Same ITT must be used.
- * Note that login_mtask is preallocated at conn_create().
+ * Same task can be used. Same ITT must be used.
+ * Note that login_task is preallocated at conn_create().
*/
- mtask = conn->login_mtask;
+ task = conn->login_task;
else {
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- if (!__kfifo_get(session->mgmtpool.queue,
- (void*)&mtask, sizeof(void*)))
+ if (!__kfifo_get(session->cmdpool.queue,
+ (void*)&task, sizeof(void*)))
return NULL;
+
+ if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+ hdr->ttt == RESERVED_ITT) {
+ conn->ping_task = task;
+ conn->last_ping = jiffies;
+ }
}
+ /*
+ * released in complete pdu for task we expect a response for, and
+ * released by the lld when it has transmitted the task for
+ * pdus we do not expect a response for.
+ */
+ atomic_set(&task->refcount, 1);
+ task->conn = conn;
+ task->sc = NULL;
if (data_size) {
- memcpy(mtask->data, data, data_size);
- mtask->data_count = data_size;
+ memcpy(task->data, data, data_size);
+ task->data_count = data_size;
+ } else
+ task->data_count = 0;
+
+ memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+ INIT_LIST_HEAD(&task->running);
+ list_add_tail(&task->running, &conn->mgmtqueue);
+
+ if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+ if (iscsi_prep_mgmt_task(conn, task)) {
+ __iscsi_put_task(task);
+ return NULL;
+ }
+
+ if (session->tt->xmit_task(task))
+ task = NULL;
+
} else
- mtask->data_count = 0;
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
- memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
- INIT_LIST_HEAD(&mtask->running);
- list_add_tail(&mtask->running, &conn->mgmtqueue);
- return mtask;
+ return task;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -462,7 +546,6 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->lock);
- scsi_queue_work(session->host, &conn->xmitwork);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
@@ -471,7 +554,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
* iscsi_cmd_rsp - SCSI Command Response processing
* @conn: iscsi connection
* @hdr: iscsi header
- * @ctask: scsi command task
+ * @task: scsi command task
* @data: cmd data buffer
* @datalen: len of buffer
*
@@ -479,12 +562,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
* then completes the command and task.
**/
static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- struct iscsi_cmd_task *ctask, char *data,
+ struct iscsi_task *task, char *data,
int datalen)
{
struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
struct iscsi_session *session = conn->session;
- struct scsi_cmnd *sc = ctask->sc;
+ struct scsi_cmnd *sc = task->sc;
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
@@ -508,7 +591,7 @@ invalid_datalen:
goto out;
}
- senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ senselen = get_unaligned_be16(data);
if (datalen < senselen)
goto invalid_datalen;
@@ -544,10 +627,10 @@ invalid_datalen:
}
out:
debug_scsi("done [sc %lx res %d itt 0x%x]\n",
- (long)sc, sc->result, ctask->itt);
+ (long)sc, sc->result, task->itt);
conn->scsirsp_pdus_cnt++;
- __iscsi_put_ctask(ctask);
+ __iscsi_put_task(task);
}
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -572,9 +655,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
{
struct iscsi_nopout hdr;
- struct iscsi_mgmt_task *mtask;
+ struct iscsi_task *task;
- if (!rhdr && conn->ping_mtask)
+ if (!rhdr && conn->ping_task)
return;
memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -588,18 +671,9 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!mtask) {
+ task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
- return;
- }
-
- /* only track our nops */
- if (!rhdr) {
- conn->ping_mtask = mtask;
- conn->last_ping = jiffies;
- }
- scsi_queue_work(conn->session->host, &conn->xmitwork);
}
static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -628,6 +702,31 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
/**
+ * iscsi_itt_to_task - look up task by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for mgmt tasks like login and nops, or if
+ * the LDD's itt space does not include the session age.
+ *
+ * The session lock must be held.
+ */
+static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+{
+ struct iscsi_session *session = conn->session;
+ uint32_t i;
+
+ if (itt == RESERVED_ITT)
+ return NULL;
+
+ i = get_itt(itt);
+ if (i >= session->cmds_max)
+ return NULL;
+
+ return session->cmds[i];
+}
+
+/**
* __iscsi_complete_pdu - complete pdu
* @conn: iscsi conn
* @hdr: iscsi header
@@ -638,108 +737,28 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* queuecommand or send generic. session lock must be held and verify
* itt must have been called.
*/
-static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *data, int datalen)
+int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
{
struct iscsi_session *session = conn->session;
int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
- struct iscsi_cmd_task *ctask;
- struct iscsi_mgmt_task *mtask;
+ struct iscsi_task *task;
uint32_t itt;
conn->last_recv = jiffies;
+ rc = iscsi_verify_itt(conn, hdr->itt);
+ if (rc)
+ return rc;
+
if (hdr->itt != RESERVED_ITT)
itt = get_itt(hdr->itt);
else
itt = ~0U;
- if (itt < session->cmds_max) {
- ctask = session->cmds[itt];
-
- debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
- opcode, conn->id, ctask->itt, datalen);
-
- switch(opcode) {
- case ISCSI_OP_SCSI_CMD_RSP:
- BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
- iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
- datalen);
- break;
- case ISCSI_OP_SCSI_DATA_IN:
- BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- conn->scsirsp_pdus_cnt++;
- __iscsi_put_ctask(ctask);
- }
- break;
- case ISCSI_OP_R2T:
- /* LLD handles this for now */
- break;
- default:
- rc = ISCSI_ERR_BAD_OPCODE;
- break;
- }
- } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
- itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
- mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
-
- debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
- opcode, conn->id, mtask->itt, datalen);
+ debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+ opcode, conn->id, itt, datalen);
- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
- switch(opcode) {
- case ISCSI_OP_LOGOUT_RSP:
- if (datalen) {
- rc = ISCSI_ERR_PROTO;
- break;
- }
- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- /* fall through */
- case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_TEXT_RSP:
- /*
- * login related PDU's exp_statsn is handled in
- * userspace
- */
- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
- rc = ISCSI_ERR_CONN_FAILED;
- iscsi_free_mgmt_task(conn, mtask);
- break;
- case ISCSI_OP_SCSI_TMFUNC_RSP:
- if (datalen) {
- rc = ISCSI_ERR_PROTO;
- break;
- }
-
- iscsi_tmf_rsp(conn, hdr);
- iscsi_free_mgmt_task(conn, mtask);
- break;
- case ISCSI_OP_NOOP_IN:
- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
- datalen) {
- rc = ISCSI_ERR_PROTO;
- break;
- }
- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
-
- if (conn->ping_mtask != mtask) {
- /*
- * If this is not in response to one of our
- * nops then it must be from userspace.
- */
- if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
- datalen))
- rc = ISCSI_ERR_CONN_FAILED;
- } else
- mod_timer(&conn->transport_timer,
- jiffies + conn->recv_timeout);
- iscsi_free_mgmt_task(conn, mtask);
- break;
- default:
- rc = ISCSI_ERR_BAD_OPCODE;
- break;
- }
- } else if (itt == ~0U) {
+ if (itt == ~0U) {
iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
switch(opcode) {
@@ -766,11 +785,104 @@ static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
rc = ISCSI_ERR_BAD_OPCODE;
break;
}
- } else
- rc = ISCSI_ERR_BAD_ITT;
+ goto out;
+ }
+ switch(opcode) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ case ISCSI_OP_SCSI_DATA_IN:
+ task = iscsi_itt_to_ctask(conn, hdr->itt);
+ if (!task)
+ return ISCSI_ERR_BAD_ITT;
+ break;
+ case ISCSI_OP_R2T:
+ /*
+ * LLD handles R2Ts if they need to.
+ */
+ return 0;
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ case ISCSI_OP_NOOP_IN:
+ task = iscsi_itt_to_task(conn, hdr->itt);
+ if (!task)
+ return ISCSI_ERR_BAD_ITT;
+ break;
+ default:
+ return ISCSI_ERR_BAD_OPCODE;
+ }
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_CMD_RSP:
+ iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+ break;
+ case ISCSI_OP_SCSI_DATA_IN:
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ conn->scsirsp_pdus_cnt++;
+ iscsi_update_cmdsn(session,
+ (struct iscsi_nopin*) hdr);
+ __iscsi_put_task(task);
+ }
+ break;
+ case ISCSI_OP_LOGOUT_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ goto recv_pdu;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ /*
+ * login related PDU's exp_statsn is handled in
+ * userspace
+ */
+ goto recv_pdu;
+ case ISCSI_OP_SCSI_TMFUNC_RSP:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ iscsi_tmf_rsp(conn, hdr);
+ __iscsi_put_task(task);
+ break;
+ case ISCSI_OP_NOOP_IN:
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+
+ if (conn->ping_task != task)
+ /*
+ * If this is not in response to one of our
+ * nops then it must be from userspace.
+ */
+ goto recv_pdu;
+
+ mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+ __iscsi_put_task(task);
+ break;
+ default:
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+
+out:
+ return rc;
+recv_pdu:
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
+ __iscsi_put_task(task);
return rc;
}
+EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
@@ -784,51 +896,63 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
}
EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
-/* verify itt (itt encoding: age+cid+itt) */
-int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- uint32_t *ret_itt)
+int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
{
struct iscsi_session *session = conn->session;
- struct iscsi_cmd_task *ctask;
- uint32_t itt;
+ uint32_t i;
- if (hdr->itt != RESERVED_ITT) {
- if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
- (session->age << ISCSI_AGE_SHIFT)) {
- iscsi_conn_printk(KERN_ERR, conn,
- "received itt %x expected session "
- "age (%x)\n", (__force u32)hdr->itt,
- session->age & ISCSI_AGE_MASK);
- return ISCSI_ERR_BAD_ITT;
- }
+ if (itt == RESERVED_ITT)
+ return 0;
- itt = get_itt(hdr->itt);
- } else
- itt = ~0U;
+ if (((__force u32)itt & ISCSI_AGE_MASK) !=
+ (session->age << ISCSI_AGE_SHIFT)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "received itt %x expected session age (%x)\n",
+ (__force u32)itt, session->age);
+ return ISCSI_ERR_BAD_ITT;
+ }
- if (itt < session->cmds_max) {
- ctask = session->cmds[itt];
+ i = get_itt(itt);
+ if (i >= session->cmds_max) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "received invalid itt index %u (max cmds "
+ "%u.\n", i, session->cmds_max);
+ return ISCSI_ERR_BAD_ITT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_verify_itt);
- if (!ctask->sc) {
- iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
- "with itt 0x%x\n", ctask->itt);
- /* force drop */
- return ISCSI_ERR_NO_SCSI_CMD;
- }
+/**
+ * iscsi_itt_to_ctask - look up ctask by itt
+ * @conn: iscsi connection
+ * @itt: itt
+ *
+ * This should be used for cmd tasks.
+ *
+ * The session lock must be held.
+ */
+struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+{
+ struct iscsi_task *task;
- if (ctask->sc->SCp.phase != session->age) {
- iscsi_conn_printk(KERN_ERR, conn,
- "iscsi: ctask's session age %d, "
- "expected %d\n", ctask->sc->SCp.phase,
- session->age);
- return ISCSI_ERR_SESSION_FAILED;
- }
+ if (iscsi_verify_itt(conn, itt))
+ return NULL;
+
+ task = iscsi_itt_to_task(conn, itt);
+ if (!task || !task->sc)
+ return NULL;
+
+ if (task->sc->SCp.phase != conn->session->age) {
+ iscsi_session_printk(KERN_ERR, conn->session,
+ "task's session age %d, expected %d\n",
+ task->sc->SCp.phase, conn->session->age);
+ return NULL;
}
- *ret_itt = itt;
- return 0;
+ return task;
}
-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
{
@@ -850,61 +974,6 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
-static void iscsi_prep_mtask(struct iscsi_conn *conn,
- struct iscsi_mgmt_task *mtask)
-{
- struct iscsi_session *session = conn->session;
- struct iscsi_hdr *hdr = mtask->hdr;
- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
-
- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
- /*
- * pre-format CmdSN for outgoing PDU.
- */
- nop->cmdsn = cpu_to_be32(session->cmdsn);
- if (hdr->itt != RESERVED_ITT) {
- hdr->itt = build_itt(mtask->itt, session->age);
- /*
- * TODO: We always use immediate, so we never hit this.
- * If we start to send tmfs or nops as non-immediate then
- * we should start checking the cmdsn numbers for mgmt tasks.
- */
- if (conn->c_stage == ISCSI_CONN_STARTED &&
- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
- session->queued_cmdsn++;
- session->cmdsn++;
- }
- }
-
- if (session->tt->init_mgmt_task)
- session->tt->init_mgmt_task(conn, mtask);
-
- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
- mtask->data_count);
-}
-
-static int iscsi_xmit_mtask(struct iscsi_conn *conn)
-{
- struct iscsi_hdr *hdr = conn->mtask->hdr;
- int rc;
-
- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
- conn->session->state = ISCSI_STATE_LOGGING_OUT;
- spin_unlock_bh(&conn->session->lock);
-
- rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
- spin_lock_bh(&conn->session->lock);
- if (rc)
- return rc;
-
- /* done with this in-progress mtask */
- conn->mtask = NULL;
- return 0;
-}
-
static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
@@ -922,37 +991,38 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
return 0;
}
-static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+static int iscsi_xmit_task(struct iscsi_conn *conn)
{
- struct iscsi_cmd_task *ctask = conn->ctask;
+ struct iscsi_task *task = conn->task;
int rc;
- __iscsi_get_ctask(ctask);
+ __iscsi_get_task(task);
spin_unlock_bh(&conn->session->lock);
- rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->lock);
- __iscsi_put_ctask(ctask);
+ __iscsi_put_task(task);
if (!rc)
- /* done with this ctask */
- conn->ctask = NULL;
+ /* done with this task */
+ conn->task = NULL;
return rc;
}
/**
- * iscsi_requeue_ctask - requeue ctask to run from session workqueue
- * @ctask: ctask to requeue
+ * iscsi_requeue_task - requeue task to run from session workqueue
+ * @task: task to requeue
*
- * LLDs that need to run a ctask from the session workqueue should call
- * this. The session lock must be held.
+ * LLDs that need to run a task from the session workqueue should call
+ * this. The session lock must be held. This should only be called
+ * by software drivers.
*/
-void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+void iscsi_requeue_task(struct iscsi_task *task)
{
- struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_conn *conn = task->conn;
- list_move_tail(&ctask->running, &conn->requeue);
+ list_move_tail(&task->running, &conn->requeue);
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+EXPORT_SYMBOL_GPL(iscsi_requeue_task);
/**
* iscsi_data_xmit - xmit any command into the scheduled connection
@@ -974,14 +1044,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
return -ENODATA;
}
- if (conn->ctask) {
- rc = iscsi_xmit_ctask(conn);
- if (rc)
- goto again;
- }
-
- if (conn->mtask) {
- rc = iscsi_xmit_mtask(conn);
+ if (conn->task) {
+ rc = iscsi_xmit_task(conn);
if (rc)
goto again;
}
@@ -993,17 +1057,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
*/
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
- conn->mtask = list_entry(conn->mgmtqueue.next,
- struct iscsi_mgmt_task, running);
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
- iscsi_free_mgmt_task(conn, conn->mtask);
- conn->mtask = NULL;
+ conn->task = list_entry(conn->mgmtqueue.next,
+ struct iscsi_task, running);
+ if (iscsi_prep_mgmt_task(conn, conn->task)) {
+ __iscsi_put_task(conn->task);
+ conn->task = NULL;
continue;
}
-
- iscsi_prep_mtask(conn, conn->mtask);
- list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
- rc = iscsi_xmit_mtask(conn);
+ rc = iscsi_xmit_task(conn);
if (rc)
goto again;
}
@@ -1013,24 +1074,21 @@ check_mgmt:
if (conn->tmf_state == TMF_QUEUED)
break;
- conn->ctask = list_entry(conn->xmitqueue.next,
- struct iscsi_cmd_task, running);
+ conn->task = list_entry(conn->xmitqueue.next,
+ struct iscsi_task, running);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
- fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ fail_command(conn, conn->task, DID_IMM_RETRY << 16);
continue;
}
- if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
- fail_command(conn, conn->ctask, DID_ABORT << 16);
+ if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+ fail_command(conn, conn->task, DID_ABORT << 16);
continue;
}
-
- conn->ctask->state = ISCSI_TASK_RUNNING;
- list_move_tail(conn->xmitqueue.next, &conn->run_list);
- rc = iscsi_xmit_ctask(conn);
+ rc = iscsi_xmit_task(conn);
if (rc)
goto again;
/*
- * we could continuously get new ctask requests so
+ * we could continuously get new task requests so
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
@@ -1048,11 +1106,11 @@ check_mgmt:
if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
break;
- conn->ctask = list_entry(conn->requeue.next,
- struct iscsi_cmd_task, running);
- conn->ctask->state = ISCSI_TASK_RUNNING;
+ conn->task = list_entry(conn->requeue.next,
+ struct iscsi_task, running);
+ conn->task->state = ISCSI_TASK_RUNNING;
list_move_tail(conn->requeue.next, &conn->run_list);
- rc = iscsi_xmit_ctask(conn);
+ rc = iscsi_xmit_task(conn);
if (rc)
goto again;
if (!list_empty(&conn->mgmtqueue))
@@ -1096,11 +1154,12 @@ enum {
int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
+ struct iscsi_cls_session *cls_session;
struct Scsi_Host *host;
int reason = 0;
struct iscsi_session *session;
struct iscsi_conn *conn;
- struct iscsi_cmd_task *ctask = NULL;
+ struct iscsi_task *task = NULL;
sc->scsi_done = done;
sc->result = 0;
@@ -1109,10 +1168,11 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
host = sc->device->host;
spin_unlock(host->host_lock);
- session = iscsi_hostdata(host->hostdata);
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
spin_lock(&session->lock);
- reason = iscsi_session_chkready(session_to_cls(session));
+ reason = iscsi_session_chkready(cls_session);
if (reason) {
sc->result = reason;
goto fault;
@@ -1167,26 +1227,39 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
goto reject;
}
- if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
sizeof(void*))) {
reason = FAILURE_OOM;
goto reject;
}
- session->queued_cmdsn++;
-
sc->SCp.phase = session->age;
- sc->SCp.ptr = (char *)ctask;
-
- atomic_set(&ctask->refcount, 1);
- ctask->state = ISCSI_TASK_PENDING;
- ctask->conn = conn;
- ctask->sc = sc;
- INIT_LIST_HEAD(&ctask->running);
+ sc->SCp.ptr = (char *)task;
+
+ atomic_set(&task->refcount, 1);
+ task->state = ISCSI_TASK_PENDING;
+ task->conn = conn;
+ task->sc = sc;
+ INIT_LIST_HEAD(&task->running);
+ list_add_tail(&task->running, &conn->xmitqueue);
+
+ if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+ if (iscsi_prep_scsi_cmd_pdu(task)) {
+ sc->result = DID_ABORT << 16;
+ sc->scsi_done = NULL;
+ iscsi_complete_command(task);
+ goto fault;
+ }
+ if (session->tt->xmit_task(task)) {
+ sc->scsi_done = NULL;
+ iscsi_complete_command(task);
+ reason = FAILURE_SESSION_NOT_READY;
+ goto reject;
+ }
+ } else
+ scsi_queue_work(session->host, &conn->xmitwork);
- list_add_tail(&ctask->running, &conn->xmitqueue);
+ session->queued_cmdsn++;
spin_unlock(&session->lock);
-
- scsi_queue_work(host, &conn->xmitwork);
spin_lock(host->host_lock);
return 0;
@@ -1205,7 +1278,7 @@ fault:
scsi_out(sc)->resid = scsi_out(sc)->length;
scsi_in(sc)->resid = scsi_in(sc)->length;
}
- sc->scsi_done(sc);
+ done(sc);
spin_lock(host->host_lock);
return 0;
}
@@ -1222,7 +1295,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
{
- struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
spin_lock_bh(&session->lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1236,9 +1309,13 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
int iscsi_eh_host_reset(struct scsi_cmnd *sc)
{
- struct Scsi_Host *host = sc->device->host;
- struct iscsi_session *session = iscsi_hostdata(host->hostdata);
- struct iscsi_conn *conn = session->leadconn;
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+ conn = session->leadconn;
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -1300,11 +1377,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
int timeout)
{
struct iscsi_session *session = conn->session;
- struct iscsi_mgmt_task *mtask;
+ struct iscsi_task *task;
- mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
NULL, 0);
- if (!mtask) {
+ if (!task) {
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
spin_lock_bh(&session->lock);
@@ -1320,7 +1397,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
- scsi_queue_work(session->host, &conn->xmitwork);
/*
* block eh thread until:
@@ -1339,7 +1415,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
- /* if the session drops it will clean up the mtask */
+ /* if the session drops it will clean up the task */
if (age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
return -ENOTCONN;
@@ -1353,48 +1429,51 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
int error)
{
- struct iscsi_cmd_task *ctask, *tmp;
+ struct iscsi_task *task, *tmp;
- if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
- conn->ctask = NULL;
+ if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+ conn->task = NULL;
/* flush pending */
- list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
- if (lun == ctask->sc->device->lun || lun == -1) {
+ list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+ if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing pending sc %p itt 0x%x\n",
- ctask->sc, ctask->itt);
- fail_command(conn, ctask, error << 16);
+ task->sc, task->itt);
+ fail_command(conn, task, error << 16);
}
}
- list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
- if (lun == ctask->sc->device->lun || lun == -1) {
+ list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+ if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing requeued sc %p itt 0x%x\n",
- ctask->sc, ctask->itt);
- fail_command(conn, ctask, error << 16);
+ task->sc, task->itt);
+ fail_command(conn, task, error << 16);
}
}
/* fail all other running */
- list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
- if (lun == ctask->sc->device->lun || lun == -1) {
+ list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+ if (lun == task->sc->device->lun || lun == -1) {
debug_scsi("failing in progress sc %p itt 0x%x\n",
- ctask->sc, ctask->itt);
- fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ task->sc, task->itt);
+ fail_command(conn, task, DID_BUS_BUSY << 16);
}
}
}
-static void iscsi_suspend_tx(struct iscsi_conn *conn)
+void iscsi_suspend_tx(struct iscsi_conn *conn)
{
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- scsi_flush_work(conn->session->host);
+ if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+ scsi_flush_work(conn->session->host);
}
+EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- scsi_queue_work(conn->session->host, &conn->xmitwork);
+ if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
}
static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1405,7 +1484,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
cls_session = starget_to_session(scsi_target(scmd->device));
- session = class_to_transport_session(cls_session);
+ session = cls_session->dd_data;
debug_scsi("scsi cmd %p timedout\n", scmd);
@@ -1443,7 +1522,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
jiffies))
rc = EH_RESET_TIMER;
/* if in the middle of checking the transport then give us more time */
- if (conn->ping_mtask)
+ if (conn->ping_task)
rc = EH_RESET_TIMER;
done:
spin_unlock(&session->lock);
@@ -1467,7 +1546,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
recv_timeout *= HZ;
last_recv = conn->last_recv;
- if (conn->ping_mtask &&
+ if (conn->ping_task &&
time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
jiffies)) {
iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
@@ -1493,27 +1572,30 @@ done:
spin_unlock(&session->lock);
}
-static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{
memset(hdr, 0, sizeof(*hdr));
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
- hdr->rtt = ctask->hdr->itt;
- hdr->refcmdsn = ctask->hdr->cmdsn;
+ memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+ hdr->rtt = task->hdr->itt;
+ hdr->refcmdsn = task->hdr->cmdsn;
}
int iscsi_eh_abort(struct scsi_cmnd *sc)
{
- struct Scsi_Host *host = sc->device->host;
- struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
struct iscsi_conn *conn;
- struct iscsi_cmd_task *ctask;
+ struct iscsi_task *task;
struct iscsi_tm *hdr;
int rc, age;
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
/*
@@ -1542,17 +1624,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
conn->eh_abort_cnt++;
age = session->age;
- ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
- /* ctask completed before time out */
- if (!ctask->sc) {
+ /* task completed before time out */
+ if (!task->sc) {
debug_scsi("sc completed while abort in progress\n");
goto success;
}
- if (ctask->state == ISCSI_TASK_PENDING) {
- fail_command(conn, ctask, DID_ABORT << 16);
+ if (task->state == ISCSI_TASK_PENDING) {
+ fail_command(conn, task, DID_ABORT << 16);
goto success;
}
@@ -1562,7 +1644,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
conn->tmf_state = TMF_QUEUED;
hdr = &conn->tmhdr;
- iscsi_prep_abort_task_pdu(ctask, hdr);
+ iscsi_prep_abort_task_pdu(task, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
rc = FAILED;
@@ -1572,16 +1654,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
switch (conn->tmf_state) {
case TMF_SUCCESS:
spin_unlock_bh(&session->lock);
+ /*
+ * stop tx side incase the target had sent a abort rsp but
+ * the initiator was still writing out data.
+ */
iscsi_suspend_tx(conn);
/*
- * clean up task if aborted. grab the recv lock as a writer
+ * we do not stop the recv side because targets have been
+ * good and have never sent us a successful tmf response
+ * then sent more data for the cmd.
*/
- write_lock_bh(conn->recv_lock);
spin_lock(&session->lock);
- fail_command(conn, ctask, DID_ABORT << 16);
+ fail_command(conn, task, DID_ABORT << 16);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
- write_unlock_bh(conn->recv_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
@@ -1591,7 +1677,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
case TMF_NOT_FOUND:
if (!sc->SCp.ptr) {
conn->tmf_state = TMF_INITIAL;
- /* ctask completed before tmf abort response */
+ /* task completed before tmf abort response */
debug_scsi("sc completed while abort in progress\n");
goto success;
}
@@ -1604,7 +1690,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
success:
spin_unlock_bh(&session->lock);
success_unlocked:
- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -1612,7 +1698,7 @@ failed:
spin_unlock_bh(&session->lock);
failed_unlocked:
debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
- ctask ? ctask->itt : 0);
+ task ? task->itt : 0);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -1630,12 +1716,15 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
int iscsi_eh_device_reset(struct scsi_cmnd *sc)
{
- struct Scsi_Host *host = sc->device->host;
- struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
struct iscsi_conn *conn;
struct iscsi_tm *hdr;
int rc = FAILED;
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
mutex_lock(&session->eh_mutex);
@@ -1678,13 +1767,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
spin_unlock_bh(&session->lock);
iscsi_suspend_tx(conn);
- /* need to grab the recv lock then session lock */
- write_lock_bh(conn->recv_lock);
+
spin_lock(&session->lock);
fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
- write_unlock_bh(conn->recv_lock);
iscsi_start_tx(conn);
goto done;
@@ -1760,177 +1847,203 @@ void iscsi_pool_free(struct iscsi_pool *q)
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
-/*
- * iSCSI Session's hostdata organization:
+/**
+ * iscsi_host_add - add host to system
+ * @shost: scsi host
+ * @pdev: parent device
+ *
+ * This should be called by partial offload and software iscsi drivers
+ * to add a host to the system.
+ */
+int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+{
+ if (!shost->can_queue)
+ shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+
+ return scsi_add_host(shost, pdev);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_add);
+
+/**
+ * iscsi_host_alloc - allocate a host and driver data
+ * @sht: scsi host template
+ * @dd_data_size: driver host data size
+ * @qdepth: default device queue depth
+ *
+ * This should be called by partial offload and software iscsi drivers.
+ * To access the driver specific memory use the iscsi_host_priv() macro.
+ */
+struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+ int dd_data_size, uint16_t qdepth)
+{
+ struct Scsi_Host *shost;
+
+ shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+ if (!shost)
+ return NULL;
+ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+
+ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+ if (qdepth != 0)
+ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+ "Queue depth must be between 1 and %d.\n",
+ qdepth, ISCSI_MAX_CMD_PER_LUN);
+ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+ shost->cmd_per_lun = qdepth;
+ return shost;
+}
+EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+
+/**
+ * iscsi_host_remove - remove host and sessions
+ * @shost: scsi host
*
- * *------------------* <== hostdata_session(host->hostdata)
- * | ptr to class sess|
- * |------------------| <== iscsi_hostdata(host->hostdata)
- * | iscsi_session |
- * *------------------*
+ * This will also remove any sessions attached to the host, but if userspace
+ * is managing the session at the same time this will break. TODO: add
+ * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+ * does not remove the memory from under us.
*/
+void iscsi_host_remove(struct Scsi_Host *shost)
+{
+ iscsi_host_for_each_session(shost, iscsi_session_teardown);
+ scsi_remove_host(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_remove);
-#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
- _sz % sizeof(unsigned long))
+void iscsi_host_free(struct Scsi_Host *shost)
+{
+ struct iscsi_host *ihost = shost_priv(shost);
-#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+ kfree(ihost->netdev);
+ kfree(ihost->hwaddress);
+ kfree(ihost->initiatorname);
+ scsi_host_put(shost);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_free);
/**
* iscsi_session_setup - create iscsi cls session and host and session
- * @scsit: scsi transport template
* @iscsit: iscsi transport template
- * @cmds_max: scsi host can queue
- * @qdepth: scsi host cmds per lun
- * @cmd_task_size: LLD ctask private data size
- * @mgmt_task_size: LLD mtask private data size
+ * @shost: scsi host
+ * @cmds_max: session can queue
+ * @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
- * @hostno: host no allocated
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
- **/
+ *
+ * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+ * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+ * for nop handling and login/logout requests.
+ */
struct iscsi_cls_session *
-iscsi_session_setup(struct iscsi_transport *iscsit,
- struct scsi_transport_template *scsit,
- uint16_t cmds_max, uint16_t qdepth,
- int cmd_task_size, int mgmt_task_size,
- uint32_t initial_cmdsn, uint32_t *hostno)
+iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+ uint16_t cmds_max, int cmd_task_size,
+ uint32_t initial_cmdsn, unsigned int id)
{
- struct Scsi_Host *shost;
struct iscsi_session *session;
struct iscsi_cls_session *cls_session;
- int cmd_i;
+ int cmd_i, scsi_cmds, total_cmds = cmds_max;
- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
- if (qdepth != 0)
- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
- "Queue depth must be between 1 and %d.\n",
- qdepth, ISCSI_MAX_CMD_PER_LUN);
- qdepth = ISCSI_DEF_CMD_PER_LUN;
+ if (!total_cmds)
+ total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+ /*
+ * The iscsi layer needs some tasks for nop handling and tmfs,
+ * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+ * + 1 command for scsi IO.
+ */
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of two that is at least %d.\n",
+ total_cmds, ISCSI_TOTAL_CMDS_MIN);
+ return NULL;
}
- if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
- cmds_max < 2) {
- if (cmds_max != 0)
- printk(KERN_ERR "iscsi: invalid can_queue of %d. "
- "can_queue must be a power of 2 and between "
- "2 and %d - setting to %d.\n", cmds_max,
- ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
- cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of 2 less than or equal to %d.\n",
+ cmds_max, ISCSI_TOTAL_CMDS_MAX);
+ total_cmds = ISCSI_TOTAL_CMDS_MAX;
}
- shost = scsi_host_alloc(iscsit->host_template,
- hostdata_privsize(sizeof(*session)));
- if (!shost)
- return NULL;
-
- /* the iscsi layer takes one task for reserve */
- shost->can_queue = cmds_max - 1;
- shost->cmd_per_lun = qdepth;
- shost->max_id = 1;
- shost->max_channel = 0;
- shost->max_lun = iscsit->max_lun;
- shost->max_cmd_len = iscsit->max_cmd_len;
- shost->transportt = scsit;
- shost->transportt->create_work_queue = 1;
- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
- *hostno = shost->host_no;
+ if (!is_power_of_2(total_cmds)) {
+ printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+ "must be a power of 2.\n", total_cmds);
+ total_cmds = rounddown_pow_of_two(total_cmds);
+ if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+ return NULL;
+ printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+ total_cmds);
+ }
+ scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
- session = iscsi_hostdata(shost->hostdata);
- memset(session, 0, sizeof(struct iscsi_session));
+ cls_session = iscsi_alloc_session(shost, iscsit,
+ sizeof(struct iscsi_session));
+ if (!cls_session)
+ return NULL;
+ session = cls_session->dd_data;
+ session->cls_session = cls_session;
session->host = shost;
session->state = ISCSI_STATE_FREE;
session->fast_abort = 1;
session->lu_reset_timeout = 15;
session->abort_timeout = 10;
- session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
- session->cmds_max = cmds_max;
+ session->scsi_cmds_max = scsi_cmds;
+ session->cmds_max = total_cmds;
session->queued_cmdsn = session->cmdsn = initial_cmdsn;
session->exp_cmdsn = initial_cmdsn + 1;
session->max_cmdsn = initial_cmdsn + 1;
session->max_r2t = 1;
session->tt = iscsit;
mutex_init(&session->eh_mutex);
+ spin_lock_init(&session->lock);
/* initialize SCSI PDU commands pool */
if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
(void***)&session->cmds,
- cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ cmd_task_size + sizeof(struct iscsi_task)))
goto cmdpool_alloc_fail;
/* pre-format cmds pool with ITT */
for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
- struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+ struct iscsi_task *task = session->cmds[cmd_i];
if (cmd_task_size)
- ctask->dd_data = &ctask[1];
- ctask->itt = cmd_i;
- INIT_LIST_HEAD(&ctask->running);
- }
-
- spin_lock_init(&session->lock);
-
- /* initialize immediate command pool */
- if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
- (void***)&session->mgmt_cmds,
- mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
- goto mgmtpool_alloc_fail;
-
-
- /* pre-format immediate cmds pool with ITT */
- for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
- struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
-
- if (mgmt_task_size)
- mtask->dd_data = &mtask[1];
- mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
- INIT_LIST_HEAD(&mtask->running);
+ task->dd_data = &task[1];
+ task->itt = cmd_i;
+ INIT_LIST_HEAD(&task->running);
}
- if (scsi_add_host(shost, NULL))
- goto add_host_fail;
-
if (!try_module_get(iscsit->owner))
- goto cls_session_fail;
-
- cls_session = iscsi_create_session(shost, iscsit, 0);
- if (!cls_session)
- goto module_put;
- *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
+ goto module_get_fail;
+ if (iscsi_add_session(cls_session, id))
+ goto cls_session_fail;
return cls_session;
-module_put:
- module_put(iscsit->owner);
cls_session_fail:
- scsi_remove_host(shost);
-add_host_fail:
- iscsi_pool_free(&session->mgmtpool);
-mgmtpool_alloc_fail:
+ module_put(iscsit->owner);
+module_get_fail:
iscsi_pool_free(&session->cmdpool);
cmdpool_alloc_fail:
- scsi_host_put(shost);
+ iscsi_free_session(cls_session);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_session_setup);
/**
* iscsi_session_teardown - destroy session, host, and cls_session
- * shost: scsi host
+ * @cls_session: iscsi session
*
- * This can be used by software iscsi_transports that allocate
- * a session per scsi host.
- **/
+ * The driver must have called iscsi_remove_session before
+ * calling this.
+ */
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct iscsi_session *session = cls_session->dd_data;
struct module *owner = cls_session->transport->owner;
- iscsi_remove_session(cls_session);
- scsi_remove_host(shost);
-
- iscsi_pool_free(&session->mgmtpool);
iscsi_pool_free(&session->cmdpool);
kfree(session->password);
@@ -1938,12 +2051,10 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->username);
kfree(session->username_in);
kfree(session->targetname);
- kfree(session->netdev);
- kfree(session->hwaddress);
kfree(session->initiatorname);
+ kfree(session->ifacename);
- iscsi_free_session(cls_session);
- scsi_host_put(shost);
+ iscsi_destroy_session(cls_session);
module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1951,22 +2062,26 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
/**
* iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
* @cls_session: iscsi_cls_session
+ * @dd_size: private driver data size
* @conn_idx: cid
- **/
+ */
struct iscsi_cls_conn *
-iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ uint32_t conn_idx)
{
- struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
char *data;
- cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+ conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn));
+ memset(conn, 0, sizeof(*conn) + dd_size);
+ conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
conn->cls_conn = cls_conn;
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
@@ -1985,30 +2100,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
INIT_LIST_HEAD(&conn->requeue);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
- /* allocate login_mtask used for the login/text sequences */
+ /* allocate login_task used for the login/text sequences */
spin_lock_bh(&session->lock);
- if (!__kfifo_get(session->mgmtpool.queue,
- (void*)&conn->login_mtask,
+ if (!__kfifo_get(session->cmdpool.queue,
+ (void*)&conn->login_task,
sizeof(void*))) {
spin_unlock_bh(&session->lock);
- goto login_mtask_alloc_fail;
+ goto login_task_alloc_fail;
}
spin_unlock_bh(&session->lock);
data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
if (!data)
- goto login_mtask_data_alloc_fail;
- conn->login_mtask->data = conn->data = data;
+ goto login_task_data_alloc_fail;
+ conn->login_task->data = conn->data = data;
init_timer(&conn->tmf_timer);
init_waitqueue_head(&conn->ehwait);
return cls_conn;
-login_mtask_data_alloc_fail:
- __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+login_task_data_alloc_fail:
+ __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
-login_mtask_alloc_fail:
+login_task_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -2068,7 +2183,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->lock);
kfree(conn->data);
kfree(conn->persistent_address);
- __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
if (session->leadconn == conn)
session->leadconn = NULL;
@@ -2140,7 +2255,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
- iscsi_unblock_session(session_to_cls(session));
+ iscsi_unblock_session(session->cls_session);
wake_up(&conn->ehwait);
return 0;
}
@@ -2149,21 +2264,23 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
static void
flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
{
- struct iscsi_mgmt_task *mtask, *tmp;
+ struct iscsi_task *task, *tmp;
/* handle pending */
- list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
- debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
- iscsi_free_mgmt_task(conn, mtask);
+ list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+ debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+ /* release ref from prep task */
+ __iscsi_put_task(task);
}
/* handle running */
- list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
- debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
- iscsi_free_mgmt_task(conn, mtask);
+ list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+ debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+ /* release ref from prep task */
+ __iscsi_put_task(task);
}
- conn->mtask = NULL;
+ conn->task = NULL;
}
static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2182,17 +2299,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
}
/*
- * The LLD either freed/unset the lock on us, or userspace called
- * stop but did not create a proper connection (connection was never
- * bound or it was unbound then stop was called).
- */
- if (!conn->recv_lock) {
- spin_unlock_bh(&session->lock);
- mutex_unlock(&session->eh_mutex);
- return;
- }
-
- /*
* When this is called for the in_login state, we only want to clean
* up the login task and connection. We do not need to block and set
* the recovery state again
@@ -2208,11 +2314,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
spin_unlock_bh(&session->lock);
iscsi_suspend_tx(conn);
-
- write_lock_bh(conn->recv_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- write_unlock_bh(conn->recv_lock);
-
/*
* for connection level recovery we should not calculate
* header digest. conn->hdr_size used for optimization
@@ -2225,7 +2326,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
if (session->state == ISCSI_STATE_IN_RECOVERY &&
old_stop_stage != STOP_CONN_RECOVER) {
debug_scsi("blocking session\n");
- iscsi_block_session(session_to_cls(session));
+ iscsi_block_session(session->cls_session);
}
}
@@ -2260,7 +2361,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, int is_leading)
{
- struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
spin_lock_bh(&session->lock);
@@ -2399,6 +2500,14 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
if (!conn->persistent_address)
return -ENOMEM;
break;
+ case ISCSI_PARAM_IFACE_NAME:
+ if (!session->ifacename)
+ session->ifacename = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
default:
return -ENOSYS;
}
@@ -2410,8 +2519,7 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
enum iscsi_param param, char *buf)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct iscsi_session *session = cls_session->dd_data;
int len;
switch(param) {
@@ -2466,6 +2574,15 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_PASSWORD_IN:
len = sprintf(buf, "%s\n", session->password_in);
break;
+ case ISCSI_PARAM_IFACE_NAME:
+ len = sprintf(buf, "%s\n", session->ifacename);
+ break;
+ case ISCSI_PARAM_INITIATOR_NAME:
+ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
default:
return -ENOSYS;
}
@@ -2525,29 +2642,35 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
char *buf)
{
- struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct iscsi_host *ihost = shost_priv(shost);
int len;
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- if (!session->netdev)
+ if (!ihost->netdev)
len = sprintf(buf, "%s\n", "default");
else
- len = sprintf(buf, "%s\n", session->netdev);
+ len = sprintf(buf, "%s\n", ihost->netdev);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
- if (!session->hwaddress)
+ if (!ihost->hwaddress)
len = sprintf(buf, "%s\n", "default");
else
- len = sprintf(buf, "%s\n", session->hwaddress);
+ len = sprintf(buf, "%s\n", ihost->hwaddress);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- if (!session->initiatorname)
+ if (!ihost->initiatorname)
len = sprintf(buf, "%s\n", "unknown");
else
- len = sprintf(buf, "%s\n", session->initiatorname);
+ len = sprintf(buf, "%s\n", ihost->initiatorname);
+ break;
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (!strlen(ihost->local_address))
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+ len = sprintf(buf, "%s\n",
+ ihost->local_address);
break;
-
default:
return -ENOSYS;
}
@@ -2559,20 +2682,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
char *buf, int buflen)
{
- struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct iscsi_host *ihost = shost_priv(shost);
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
- if (!session->netdev)
- session->netdev = kstrdup(buf, GFP_KERNEL);
+ if (!ihost->netdev)
+ ihost->netdev = kstrdup(buf, GFP_KERNEL);
break;
case ISCSI_HOST_PARAM_HWADDRESS:
- if (!session->hwaddress)
- session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ if (!ihost->hwaddress)
+ ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- if (!session->initiatorname)
- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ if (!ihost->initiatorname)
+ ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
break;
default:
return -ENOSYS;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index ec0b0f6e5e1a..e0e018d12653 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -33,6 +33,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 256 /* sg element count per scsi cmnd */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
+#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
/*
* Following time intervals are used of adjusting SCSI device
@@ -59,6 +60,9 @@ struct lpfc_sli2_slim;
#define MAX_HBAEVT 32
+/* lpfc wait event data ready flag */
+#define LPFC_DATA_READY (1<<0)
+
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
@@ -425,9 +429,6 @@ struct lpfc_hba {
uint16_t pci_cfg_value;
- uint8_t work_found;
-#define LPFC_MAX_WORKER_ITERATION 4
-
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
@@ -489,8 +490,9 @@ struct lpfc_hba {
uint32_t work_hs; /* HS stored in case of ERRAT */
uint32_t work_status[2]; /* Extra status from SLIM */
- wait_queue_head_t *work_wait;
+ wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
+ long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
@@ -637,6 +639,17 @@ lpfc_is_link_up(struct lpfc_hba *phba)
phba->link_state == LPFC_HBA_READY;
}
+static inline void
+lpfc_worker_wake_up(struct lpfc_hba *phba)
+{
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+
+ /* Wake up worker thread */
+ wake_up(&phba->work_waitq);
+ return;
+}
+
#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
#define FC_REG_TEMPERATURE_EVENT 0x20 /* Register for temperature
event */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 960baaf11fb1..37bfa0bd1dae 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1995,8 +1995,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
/* Don't allow mailbox commands to be sent when blocked
* or when in the middle of discovery
*/
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO ||
- vport->fc_flag & FC_NDISC_ACTIVE) {
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
sysfs_mbox_idle(phba);
spin_unlock_irq(&phba->hbalock);
return -EAGAIN;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7c9f8317d972..1b8245213b83 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -142,7 +142,7 @@ int lpfc_config_port_post(struct lpfc_hba *);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -263,6 +263,7 @@ extern int lpfc_sli_mode;
extern int lpfc_enable_npiv;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
+int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
void lpfc_terminate_rport_io(struct fc_rport *);
void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 153afae567b5..7fc74cf5823b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -101,7 +101,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 2, 1);
+ lpfc_post_buffer(phba, pring, 2);
return;
}
@@ -151,7 +151,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_del(&iocbq->list);
lpfc_sli_release_iocbq(phba, iocbq);
- lpfc_post_buffer(phba, pring, i, 1);
+ lpfc_post_buffer(phba, pring, i);
}
}
}
@@ -990,7 +990,7 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
-static int
+int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
@@ -1679,20 +1679,18 @@ lpfc_fdmi_tmo(unsigned long ptr)
{
struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
unsigned long iflag;
spin_lock_irqsave(&vport->work_port_lock, iflag);
- if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
+ tmo_posted = vport->work_port_events & WORKER_FDMI_TMO;
+ if (!tmo_posted)
vport->work_port_events |= WORKER_FDMI_TMO;
- spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- }
- else
- spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
}
void
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 886c5f1b11d2..f54e0f7eaee3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1754,29 +1754,34 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_work_evt *evtp;
+ if (!(nlp->nlp_flag & NLP_DELAY_TMO))
+ return;
spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
del_timer_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
-
if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
list_del_init(&nlp->els_retry_evt.evt_listp);
/* Decrement nlp reference count held for the delayed retry */
evtp = &nlp->els_retry_evt;
lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
}
-
if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
spin_lock_irq(shost->host_lock);
nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
if (vport->num_disc_nodes) {
- /* Check to see if there are more
- * PLOGIs to be sent
- */
- lpfc_more_plogi(vport);
-
+ if (vport->port_state < LPFC_VPORT_READY) {
+ /* Check if there are more ADISCs to be sent */
+ lpfc_more_adisc(vport);
+ if ((vport->num_disc_nodes == 0) &&
+ (vport->fc_npr_cnt))
+ lpfc_els_disc_plogi(vport);
+ } else {
+ /* Check if there are more PLOGIs to be sent */
+ lpfc_more_plogi(vport);
+ }
if (vport->num_disc_nodes == 0) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
@@ -1798,10 +1803,6 @@ lpfc_els_retry_delay(unsigned long ptr)
unsigned long flags;
struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
- ndlp = (struct lpfc_nodelist *) ptr;
- phba = ndlp->vport->phba;
- evtp = &ndlp->els_retry_evt;
-
spin_lock_irqsave(&phba->hbalock, flags);
if (!list_empty(&evtp->evt_listp)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1812,11 +1813,11 @@ lpfc_els_retry_delay(unsigned long ptr)
* count until the queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- evtp->evt = LPFC_EVT_ELS_RETRY;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- if (phba->work_wait)
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_ELS_RETRY;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba);
-
+ }
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
@@ -2761,10 +2762,11 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr = (PRLI *) pcmd;
vpd = &phba->vpd;
/*
- * If our firmware version is 3.20 or later,
- * set the following bits for FC-TAPE support.
+ * If the remote port is a target and our firmware version is 3.20 or
+ * later, set the following bits for FC-TAPE support.
*/
- if (vpd->rev.feaLevelHigh >= 0x02) {
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vpd->rev.feaLevelHigh >= 0x02)) {
npr->ConfmComplAllowed = 1;
npr->Retry = 1;
npr->TaskRetryIdReq = 1;
@@ -3056,27 +3058,16 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp = NULL;
- /* Look at all nodes effected by pending RSCNs and move
- * them to NPR state.
- */
-
+ /* Move all affected nodes by pending RSCNs to NPR state. */
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp) ||
- ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
- lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
+ (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
+ !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
-
lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
-
- /*
- * Make sure NLP_DELAY_TMO is NOT running after a device
- * recovery event.
- */
- if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
}
-
return 0;
}
@@ -3781,91 +3772,27 @@ static int
lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *fan_ndlp)
{
- struct lpfc_dmabuf *pcmd;
+ struct lpfc_hba *phba = vport->phba;
uint32_t *lp;
- IOCB_t *icmd;
- uint32_t cmd, did;
FAN *fp;
- struct lpfc_nodelist *ndlp, *next_ndlp;
- struct lpfc_hba *phba = vport->phba;
-
- /* FAN received */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0265 FAN received\n");
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
- lp = (uint32_t *)pcmd->virt;
-
- cmd = *lp++;
- fp = (FAN *) lp;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
+ lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ fp = (FAN *) ++lp;
/* FAN received; Fan does not have a reply sequence */
-
- if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
+ if ((vport == phba->pport) &&
+ (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
- sizeof(struct lpfc_name)) != 0) ||
+ sizeof(struct lpfc_name))) ||
(memcmp(&phba->fc_fabparam.portName, &fp->FportName,
- sizeof(struct lpfc_name)) != 0)) {
- /*
- * This node has switched fabrics. FLOGI is required
- * Clean up the old rpi's
- */
-
- list_for_each_entry_safe(ndlp, next_ndlp,
- &vport->fc_nodes, nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state != NLP_STE_NPR_NODE)
- continue;
- if (ndlp->nlp_type & NLP_FABRIC) {
- /*
- * Clean up old Fabric, Nameserver and
- * other NLP_FABRIC logins
- */
- lpfc_drop_node(vport, ndlp);
-
- } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
- /* Fail outstanding I/O now since this
- * device is marked for PLOGI
- */
- lpfc_unreg_rpi(vport, ndlp);
- }
- }
-
+ sizeof(struct lpfc_name)))) {
+ /* This port has switched fabrics. FLOGI is required */
lpfc_initial_flogi(vport);
- return 0;
- }
- /* Discovery not needed,
- * move the nodes to their original state.
- */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
- nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state != NLP_STE_NPR_NODE)
- continue;
-
- switch (ndlp->nlp_prev_state) {
- case NLP_STE_UNMAPPED_NODE:
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp,
- NLP_STE_UNMAPPED_NODE);
- break;
-
- case NLP_STE_MAPPED_NODE:
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp,
- NLP_STE_MAPPED_NODE);
- break;
-
- default:
- break;
- }
+ } else {
+ /* FAN verified - skip FLOGI */
+ vport->fc_myDID = vport->fc_prevDID;
+ lpfc_issue_fabric_reglogin(vport);
}
-
- /* Start discovery - this should just do CLEAR_LA */
- lpfc_disc_start(vport);
}
return 0;
}
@@ -3875,20 +3802,17 @@ lpfc_els_timeout(unsigned long ptr)
{
struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
unsigned long iflag;
spin_lock_irqsave(&vport->work_port_lock, iflag);
- if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
+ tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
+ if (!tmo_posted)
vport->work_port_events |= WORKER_ELS_TMO;
- spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- }
- else
- spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
return;
}
@@ -3933,9 +3857,6 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
els_command == ELS_CMD_FDISC)
continue;
- if (vport != piocb->vport)
- continue;
-
if (piocb->drvrTimeout > 0) {
if (piocb->drvrTimeout >= timeout)
piocb->drvrTimeout -= timeout;
@@ -4089,7 +4010,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
- lpfc_post_buffer(phba, pring, 1, 1);
+ lpfc_post_buffer(phba, pring, 1);
did = icmd->un.rcvels.remoteID;
if (icmd->ulpStatus) {
@@ -4398,7 +4319,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0, 1);
+ lpfc_post_buffer(phba, pring, 0);
return;
}
@@ -4842,18 +4763,16 @@ lpfc_fabric_block_timeout(unsigned long ptr)
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflags;
uint32_t tmo_posted;
+
spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
- if (!tmo_posted) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
}
static void
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 7cb68feb04fd..a98d11bf3576 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
* count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- if (phba->work_wait)
- wake_up(phba->work_wait);
-
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
spin_unlock_irq(&phba->hbalock);
return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
}
-
-void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
- wake_up(phba->work_wait);
- return;
-}
-
static void
lpfc_work_list_done(struct lpfc_hba *phba)
{
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
|| (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
if (pring->flag & LPFC_STOP_IOCB_EVENT) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_work_list_done(phba);
}
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
- struct lpfc_vport *vport;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
- int rc = 0;
-
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->work_port_events) {
- rc = 1;
- break;
- }
- }
- if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
- kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
- rc = 1;
- phba->work_found++;
- } else
- phba->work_found = 0;
- spin_unlock_irq(&phba->hbalock);
- return rc;
-}
-
-
int
lpfc_do_work(void *p)
{
struct lpfc_hba *phba = p;
int rc;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
set_user_nice(current, -20);
- phba->work_wait = &work_waitq;
- phba->work_found = 0;
+ phba->data_flags = 0;
while (1) {
-
- rc = wait_event_interruptible(work_waitq,
- check_work_wait_done(phba));
-
+ /* wait and check worker queue activities */
+ rc = wait_event_interruptible(phba->work_waitq,
+ (test_and_clear_bit(LPFC_DATA_READY,
+ &phba->data_flags)
+ || kthread_should_stop()));
BUG_ON(rc);
if (kthread_should_stop())
break;
+ /* Attend pending lpfc data processing */
lpfc_work_done(phba);
-
- /* If there is alot of slow ring work, like during link up
- * check_work_wait_done() may cause this thread to not give
- * up the CPU for very long periods of time. This may cause
- * soft lockups or other problems. To avoid these situations
- * give up the CPU here after LPFC_MAX_WORKER_ITERATION
- * consecutive iterations.
- */
- if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
- phba->work_found = 0;
- schedule();
- }
}
- spin_lock_irq(&phba->hbalock);
- phba->work_wait = NULL;
- spin_unlock_irq(&phba->hbalock);
return 0;
}
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
spin_lock_irqsave(&phba->hbalock, flags);
list_add_tail(&evtp->evt_listp, &phba->work_list);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_worker_wake_up(phba);
+
return 1;
}
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
if (phba->fc_topology == TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+ if (phba->cfg_enable_npiv)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1309 Link Up Event npiv not supported in loop "
+ "topology\n");
/* Get Loop Map information */
if (la->il)
vport->fc_flag |= FC_LBIT;
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ /* Unblock ELS traffic */
+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, old_state, state);
if (old_state == NLP_STE_NPR_NODE &&
- (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
state != NLP_STE_NPR_NODE)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
spin_lock_irq(shost->host_lock);
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
static void
lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
- if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_last_elscmd = 0;
del_timer_sync(&ndlp->nlp_delayfunc);
- if (!list_empty(&ndlp->els_retry_evt.evt_listp))
- list_del_init(&ndlp->els_retry_evt.evt_listp);
- if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
- list_del_init(&ndlp->dev_loss_evt.evt_listp);
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_unreg_rpi(vport, ndlp);
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
LPFC_MBOXQ_t *mbox;
int rc;
- if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- }
-
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
- if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
} else
ndlp = NULL;
} else {
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
{
struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
unsigned long flags = 0;
if (unlikely(!phba))
return;
- if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
- spin_lock_irqsave(&vport->work_port_lock, flags);
+ spin_lock_irqsave(&vport->work_port_lock, flags);
+ tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+ if (!tmo_posted)
vport->work_port_events |= WORKER_DISC_TMO;
- spin_unlock_irqrestore(&vport->work_port_lock, flags);
+ spin_unlock_irqrestore(&vport->work_port_lock, flags);
- spin_lock_irqsave(&phba->hbalock, flags);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- }
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fa757b251f82..5b6e5395c8eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -145,8 +145,10 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
return -ERESTART;
}
- if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
+ if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
+ mempool_free(pmb, phba->mbox_mem_pool);
return -EINVAL;
+ }
/* Save information as VPD data */
vp->rev.rBit = 1;
@@ -551,18 +553,18 @@ static void
lpfc_hb_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
+ uint32_t tmo_posted;
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- if (!(phba->pport->work_port_events & WORKER_HB_TMO))
+ tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
+ if (!tmo_posted)
phba->pport->work_port_events |= WORKER_HB_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
return;
}
@@ -851,6 +853,8 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_read_la(phba, pmb, mp);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
pmb->vport = vport;
+ /* Block ELS IOCBs until we have processed this mbox command */
+ phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = 4;
@@ -866,6 +870,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
return;
lpfc_handle_latt_free_mbuf:
+ phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
lpfc_handle_latt_free_mp:
kfree(mp);
@@ -1194,8 +1199,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
/* Returns the number of buffers NOT posted. */
/**************************************************/
int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
- int type)
+lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
@@ -1295,7 +1299,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
@@ -1454,6 +1458,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
+
+ /* nlp_type zero is not defined, nlp_flag zero also not defined,
+ * nlp_state is unused, this happens when
+ * an initiator has logged
+ * into us so cleanup this ndlp.
+ */
+ if ((ndlp->nlp_type == 0) && (ndlp->nlp_flag == 0) &&
+ (ndlp->nlp_state == 0))
+ lpfc_nlp_put(ndlp);
}
/* At this point, ALL ndlp's should be gone
@@ -2101,6 +2114,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT);
phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+ /* Initialize the wait queue head for the kernel thread */
+ init_waitqueue_head(&phba->work_waitq);
+
/* Startup the kernel thread for this host adapter. */
phba->worker_thread = kthread_run(lpfc_do_work, phba,
"lpfc_worker_%d", phba->brd_no);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d08c4c890744..6688a8689b56 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -235,10 +235,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
(iocb->iocb_cmpl) (phba, iocb, iocb);
}
}
-
- /* If we are delaying issuing an ELS command, cancel it */
- if (ndlp->nlp_flag & NLP_DELAY_TMO)
- lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+ lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
return 0;
}
@@ -249,7 +246,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
- struct lpfc_work_evt *evtp;
uint32_t *lp;
IOCB_t *icmd;
struct serv_parm *sp;
@@ -425,73 +421,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, mbox);
return 1;
}
-
- /* If the remote NPort logs into us, before we can initiate
- * discovery to them, cleanup the NPort from discovery accordingly.
- */
- if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_DELAY_TMO;
- spin_unlock_irq(shost->host_lock);
- del_timer_sync(&ndlp->nlp_delayfunc);
- ndlp->nlp_last_elscmd = 0;
-
- if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
- list_del_init(&ndlp->els_retry_evt.evt_listp);
- /* Decrement ndlp reference count held for the
- * delayed retry
- */
- evtp = &ndlp->els_retry_evt;
- lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
- }
-
- if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
-
- if ((ndlp->nlp_flag & NLP_ADISC_SND) &&
- (vport->num_disc_nodes)) {
- /* Check to see if there are more
- * ADISCs to be sent
- */
- lpfc_more_adisc(vport);
-
- if ((vport->num_disc_nodes == 0) &&
- (vport->fc_npr_cnt))
- lpfc_els_disc_plogi(vport);
-
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
- }
- }
- }
- } else if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
- (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
- (vport->num_disc_nodes)) {
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(shost->host_lock);
- /* Check to see if there are more
- * PLOGIs to be sent
- */
- lpfc_more_plogi(vport);
- if (vport->num_disc_nodes == 0) {
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_NDISC_ACTIVE;
- spin_unlock_irq(shost->host_lock);
- lpfc_can_disctmo(vport);
- lpfc_end_rscn(vport);
- }
- }
-
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
return 1;
-
out:
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
@@ -574,7 +505,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if (!(ndlp->nlp_type & NLP_FABRIC) ||
+ if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+ ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+ !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
@@ -751,6 +684,7 @@ static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -776,7 +710,22 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
NULL);
} else {
- lpfc_rcv_plogi(vport, ndlp, cmdiocb);
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
+ (vport->num_disc_nodes)) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ /* Check if there are more PLOGIs to be sent */
+ lpfc_more_plogi(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
} /* If our portname was less */
return ndlp->nlp_state;
@@ -1040,6 +989,7 @@ static uint32_t
lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
@@ -1048,9 +998,28 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cmdiocb = (struct lpfc_iocbq *) arg;
- if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
- return ndlp->nlp_state;
+ if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(shost->host_lock);
+ if (vport->num_disc_nodes) {
+ lpfc_more_adisc(vport);
+ if ((vport->num_disc_nodes == 0) &&
+ (vport->fc_npr_cnt))
+ lpfc_els_disc_plogi(vport);
+ if (vport->num_disc_nodes == 0) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_NDISC_ACTIVE;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_can_disctmo(vport);
+ lpfc_end_rscn(vport);
+ }
+ }
+ }
+ return ndlp->nlp_state;
+ }
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -1742,24 +1711,21 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* Ignore PLOGI if we have an outstanding LOGO */
- if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
+ if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
return ndlp->nlp_state;
- }
-
if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
- return ndlp->nlp_state;
- }
-
- /* send PLOGI immediately, move to PLOGI issue state */
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
- ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
- lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
+ /* send PLOGI immediately, move to PLOGI issue state */
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
+ lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ }
}
-
return ndlp->nlp_state;
}
@@ -1810,7 +1776,6 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
lpfc_rcv_padisc(vport, ndlp, cmdiocb);
-
/*
* Do not start discovery if discovery is about to start
* or discovery in progress for this node. Starting discovery
@@ -1973,9 +1938,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
- if (ndlp->nlp_flag & NLP_DELAY_TMO) {
- lpfc_cancel_retry_delay_tmo(vport, ndlp);
- }
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0910a9ab76a5..c94da4f2b8a6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -50,6 +50,7 @@ void
lpfc_adjust_queue_depth(struct lpfc_hba *phba)
{
unsigned long flags;
+ uint32_t evt_posted;
spin_lock_irqsave(&phba->hbalock, flags);
atomic_inc(&phba->num_rsrc_err);
@@ -65,17 +66,13 @@ lpfc_adjust_queue_depth(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
- if ((phba->pport->work_port_events &
- WORKER_RAMP_DOWN_QUEUE) == 0) {
+ evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
+ if (!evt_posted)
phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
- }
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
- spin_lock_irqsave(&phba->hbalock, flags);
- if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
+ if (!evt_posted)
+ lpfc_worker_wake_up(phba);
return;
}
@@ -89,6 +86,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
{
unsigned long flags;
struct lpfc_hba *phba = vport->phba;
+ uint32_t evt_posted;
atomic_inc(&phba->num_cmd_success);
if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
@@ -103,16 +101,14 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
spin_unlock_irqrestore(&phba->hbalock, flags);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
- if ((phba->pport->work_port_events &
- WORKER_RAMP_UP_QUEUE) == 0) {
+ evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
+ if (!evt_posted)
phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
- }
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
- spin_lock_irqsave(&phba->hbalock, flags);
- if (phba->work_wait)
- wake_up(phba->work_wait);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (!evt_posted)
+ lpfc_worker_wake_up(phba);
+ return;
}
void
@@ -609,9 +605,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
result = cmd->result;
sdev = cmd->device;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- spin_lock_irqsave(sdev->host->host_lock, flags);
- lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
@@ -620,6 +613,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock_irqsave(sdev->host->host_lock, flags);
+ lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -690,6 +684,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock_irqsave(sdev->host->host_lock, flags);
+ lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
@@ -849,14 +844,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
+ int status;
if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
FCP_TARGET_RESET);
- if (!ret)
+ if (!status)
return FAILED;
iocbq = &lpfc_cmd->cur_iocbq;
@@ -869,12 +865,15 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
- ret = lpfc_sli_issue_iocb_wait(phba,
+ status = lpfc_sli_issue_iocb_wait(phba,
&phba->sli.ring[phba->sli.fcp_ring],
iocbq, iocbqrsp, lpfc_cmd->timeout);
- if (ret != IOCB_SUCCESS) {
- if (ret == IOCB_TIMEDOUT)
+ if (status != IOCB_SUCCESS) {
+ if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ ret = TIMEOUT_ERROR;
+ } else
+ ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
} else {
ret = SUCCESS;
@@ -1142,121 +1141,96 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_iocbq *iocbq, *iocbqrsp;
struct lpfc_rport_data *rdata = cmnd->device->hostdata;
struct lpfc_nodelist *pnode = rdata->pnode;
- uint32_t cmd_result = 0, cmd_status = 0;
- int ret = FAILED;
- int iocb_status = IOCB_SUCCESS;
- int cnt, loopcnt;
+ unsigned long later;
+ int ret = SUCCESS;
+ int status;
+ int cnt;
lpfc_block_error_handler(cmnd);
- loopcnt = 0;
/*
* If target is not in a MAPPED state, delay the reset until
* target is rediscovered or devloss timeout expires.
*/
- while (1) {
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies)) {
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- goto out;
-
- if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
- schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- loopcnt++;
- rdata = cmnd->device->hostdata;
- if (!rdata ||
- (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0721 LUN Reset rport "
- "failure: cnt x%x rdata x%p\n",
- loopcnt, rdata);
- goto out;
- }
- pnode = rdata->pnode;
- if (!pnode || !NLP_CHK_NODE_ACT(pnode))
- goto out;
- }
+ return FAILED;
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
break;
+ schedule_timeout_uninterruptible(msecs_to_jiffies(500));
+ rdata = cmnd->device->hostdata;
+ if (!rdata)
+ break;
+ pnode = rdata->pnode;
+ }
+ if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0721 LUN Reset rport "
+ "failure: msec x%x rdata x%p\n",
+ jiffies_to_msecs(jiffies - later), rdata);
+ return FAILED;
}
-
lpfc_cmd = lpfc_get_scsi_buf(phba);
if (lpfc_cmd == NULL)
- goto out;
-
+ return FAILED;
lpfc_cmd->timeout = 60;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
- FCP_TARGET_RESET);
- if (!ret)
- goto out_free_scsi_buf;
-
+ status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
+ cmnd->device->lun,
+ FCP_TARGET_RESET);
+ if (!status) {
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return FAILED;
+ }
iocbq = &lpfc_cmd->cur_iocbq;
/* get a buffer for this IOCB command response */
iocbqrsp = lpfc_sli_get_iocbq(phba);
- if (iocbqrsp == NULL)
- goto out_free_scsi_buf;
-
+ if (iocbqrsp == NULL) {
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ return FAILED;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0703 Issue target reset to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n", cmnd->device->id,
cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
- iocb_status = lpfc_sli_issue_iocb_wait(phba,
- &phba->sli.ring[phba->sli.fcp_ring],
- iocbq, iocbqrsp, lpfc_cmd->timeout);
-
- if (iocb_status == IOCB_TIMEDOUT)
+ status = lpfc_sli_issue_iocb_wait(phba,
+ &phba->sli.ring[phba->sli.fcp_ring],
+ iocbq, iocbqrsp, lpfc_cmd->timeout);
+ if (status == IOCB_TIMEDOUT) {
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
-
- if (iocb_status == IOCB_SUCCESS)
- ret = SUCCESS;
- else
- ret = iocb_status;
-
- cmd_result = iocbqrsp->iocb.un.ulpWord[4];
- cmd_status = iocbqrsp->iocb.ulpStatus;
-
+ ret = TIMEOUT_ERROR;
+ } else {
+ if (status != IOCB_SUCCESS)
+ ret = FAILED;
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ }
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0713 SCSI layer issued device reset (%d, %d) "
+ "return x%x status x%x result x%x\n",
+ cmnd->device->id, cmnd->device->lun, ret,
+ iocbqrsp->iocb.ulpStatus,
+ iocbqrsp->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, iocbqrsp);
-
- /*
- * All outstanding txcmplq I/Os should have been aborted by the device.
- * Unfortunately, some targets do not abide by this forcing the driver
- * to double check.
- */
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
- LPFC_CTX_LUN);
+ LPFC_CTX_TGT);
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
cmnd->device->id, cmnd->device->lun,
- LPFC_CTX_LUN);
- loopcnt = 0;
- while(cnt) {
- schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
- if (++loopcnt
- > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
- break;
-
+ LPFC_CTX_TGT);
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
- cmnd->device->lun, LPFC_CTX_LUN);
+ cmnd->device->lun, LPFC_CTX_TGT);
}
-
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0719 device reset I/O flush failure: "
"cnt x%x\n", cnt);
ret = FAILED;
}
-
-out_free_scsi_buf:
- if (iocb_status != IOCB_TIMEDOUT) {
- lpfc_release_scsi_buf(phba, lpfc_cmd);
- }
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0713 SCSI layer issued device reset (%d, %d) "
- "return x%x status x%x result x%x\n",
- cmnd->device->id, cmnd->device->lun, ret,
- cmd_status, cmd_result);
-out:
return ret;
}
@@ -1268,19 +1242,12 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
int match;
- int ret = FAILED, i, err_count = 0;
- int cnt, loopcnt;
+ int ret = SUCCESS, status, i;
+ int cnt;
struct lpfc_scsi_buf * lpfc_cmd;
+ unsigned long later;
lpfc_block_error_handler(cmnd);
-
- lpfc_cmd = lpfc_get_scsi_buf(phba);
- if (lpfc_cmd == NULL)
- goto out;
-
- /* The lpfc_cmd storage is reused. Set all loop invariants. */
- lpfc_cmd->timeout = 60;
-
/*
* Since the driver manages a single bus device, reset all
* targets known to the driver. Should any target reset
@@ -1294,7 +1261,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
- i == ndlp->nlp_sid &&
+ ndlp->nlp_sid == i &&
ndlp->rport) {
match = 1;
break;
@@ -1303,27 +1270,22 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
spin_unlock_irq(shost->host_lock);
if (!match)
continue;
-
- ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
- cmnd->device->lun,
- ndlp->rport->dd_data);
- if (ret != SUCCESS) {
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
+ if (lpfc_cmd) {
+ lpfc_cmd->timeout = 60;
+ status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
+ cmnd->device->lun,
+ ndlp->rport->dd_data);
+ if (status != TIMEOUT_ERROR)
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ }
+ if (!lpfc_cmd || status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0700 Bus Reset on target %d failed\n",
i);
- err_count++;
- break;
+ ret = FAILED;
}
}
-
- if (ret != IOCB_TIMEDOUT)
- lpfc_release_scsi_buf(phba, lpfc_cmd);
-
- if (err_count == 0)
- ret = SUCCESS;
- else
- ret = FAILED;
-
/*
* All outstanding txcmplq I/Os should have been aborted by
* the targets. Unfortunately, some targets do not abide by
@@ -1333,27 +1295,19 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
if (cnt)
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
0, 0, LPFC_CTX_HOST);
- loopcnt = 0;
- while(cnt) {
- schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
-
- if (++loopcnt
- > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
- break;
-
+ later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ while (time_after(later, jiffies) && cnt) {
+ schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
}
-
if (cnt) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0715 Bus Reset I/O flush failure: "
"cnt x%x left x%x\n", cnt, i);
ret = FAILED;
}
-
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
-out:
return ret;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70a0a9eab211..f40aa7b905f7 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -324,9 +324,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
- /* hbalock should already be held */
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
return NULL;
}
@@ -1309,9 +1307,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
- /* hbalock should already be held */
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
return;
}
@@ -2611,12 +2607,9 @@ lpfc_mbox_timeout(unsigned long ptr)
phba->pport->work_port_events |= WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!tmo_posted) {
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- }
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
}
void
@@ -3374,8 +3367,12 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
prev_pring_flag = pring->flag;
- if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
/*
* Error everything on the txq since these iocbs have not been
* given to the FW yet.
@@ -3434,8 +3431,12 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
/*
* Error everything on the txq since these iocbs have not been
@@ -3762,7 +3763,6 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_scsi_buf *lpfc_cmd;
- struct scsi_cmnd *cmnd;
int rc = 1;
if (!(iocbq->iocb_flag & LPFC_IO_FCP))
@@ -3772,19 +3772,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
- cmnd = lpfc_cmd->pCmd;
- if (cmnd == NULL)
+ if (lpfc_cmd->pCmd == NULL)
return rc;
switch (ctx_cmd) {
case LPFC_CTX_LUN:
- if ((cmnd->device->id == tgt_id) &&
- (cmnd->device->lun == lun_id))
+ if ((lpfc_cmd->rdata->pnode) &&
+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
+ (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0;
break;
case LPFC_CTX_TGT:
- if (cmnd->device->id == tgt_id)
+ if ((lpfc_cmd->rdata->pnode) &&
+ (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0;
break;
case LPFC_CTX_HOST:
@@ -3994,6 +3995,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
if (pmboxq->context1)
return MBX_NOT_FINISHED;
+ pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
/* setup context field to pass wait_queue pointer to wake function */
@@ -4159,7 +4161,7 @@ lpfc_intr_handler(int irq, void *dev_id)
"pwork:x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
- phba->work_wait));
+ &phba->work_waitq));
control &=
~(HC_R0INT_ENA << LPFC_ELS_RING);
@@ -4172,7 +4174,7 @@ lpfc_intr_handler(int irq, void *dev_id)
"x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
- phba->work_wait));
+ &phba->work_waitq));
}
spin_unlock(&phba->hbalock);
}
@@ -4297,9 +4299,8 @@ send_current_mbox:
spin_lock(&phba->hbalock);
phba->work_ha |= work_ha_copy;
- if (phba->work_wait)
- lpfc_worker_wake_up(phba);
spin_unlock(&phba->hbalock);
+ lpfc_worker_wake_up(phba);
}
ha_copy &= ~(phba->work_ha_mask);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b22b893019f4..ad24cacfbe10 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.2.6"
+#define LPFC_DRIVER_VERSION "8.2.7"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6feaf59b0b1b..109f89d98830 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -216,6 +216,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
int vpi;
int rc = VPORT_ERROR;
int status;
+ int size;
if ((phba->sli_rev < 3) ||
!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
@@ -278,7 +279,20 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-
+ size = strnlen(fc_vport->symbolic_name, LPFC_VNAME_LEN);
+ if (size) {
+ vport->vname = kzalloc(size+1, GFP_KERNEL);
+ if (!vport->vname) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ "1814 Create VPORT failed. "
+ "vname allocation failed.\n");
+ rc = VPORT_ERROR;
+ lpfc_free_vpi(phba, vpi);
+ destroy_port(vport);
+ goto error_out;
+ }
+ memcpy(vport->vname, fc_vport->symbolic_name, size+1);
+ }
if (fc_vport->node_name != 0)
u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
if (fc_vport->port_name != 0)
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 18551aaf5e09..28c9da7d4a5c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -46,6 +46,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/smp_lock.h>
#include <scsi/scsicam.h>
#include "scsi.h"
@@ -3272,12 +3273,12 @@ mega_init_scb(adapter_t *adapter)
* @filep - unused
*
* Routines for the character/ioctl interface to the driver. Find out if this
- * is a valid open. If yes, increment the module use count so that it cannot
- * be unloaded.
+ * is a valid open.
*/
static int
megadev_open (struct inode *inode, struct file *filep)
{
+ cycle_kernel_lock();
/*
* Only allow superuser to access private ioctl interface
*/
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 0ad215e27b83..ac3b280c2a72 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -15,6 +15,7 @@
* Common management module
*/
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include "megaraid_mm.h"
@@ -96,6 +97,7 @@ mraid_mm_open(struct inode *inode, struct file *filep)
*/
if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
+ cycle_kernel_lock();
return 0;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 7d84c8bbcf3f..fc7ac158476c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -33,6 +33,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/smp_lock.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
@@ -2863,6 +2864,7 @@ static void megasas_shutdown(struct pci_dev *pdev)
*/
static int megasas_mgmt_open(struct inode *inode, struct file *filep)
{
+ cycle_kernel_lock();
/*
* Allow only those users with admin rights
*/
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index fd63b06d9ef1..11aa917629ac 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1765,7 +1765,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
default:
return 0;
}
- if (mesg.event == mdev->ofdev.dev.power.power_state.event)
+ if (ms->phase == sleeping)
return 0;
scsi_block_requests(ms->host);
@@ -1780,8 +1780,6 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
disable_irq(ms->meshintr);
set_mesh_power(ms, 0);
- mdev->ofdev.dev.power.power_state = mesg;
-
return 0;
}
@@ -1790,7 +1788,7 @@ static int mesh_resume(struct macio_dev *mdev)
struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
unsigned long flags;
- if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
+ if (ms->phase != sleeping)
return 0;
set_mesh_power(ms, 1);
@@ -1801,8 +1799,6 @@ static int mesh_resume(struct macio_dev *mdev)
enable_irq(ms->meshintr);
scsi_unblock_requests(ms->host);
- mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
-
return 0;
}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 243d8becd30f..1c79f9794f4e 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -50,6 +50,7 @@ static const char * osst_version = "0.99.4";
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/dma.h>
#include <asm/system.h>
@@ -4359,7 +4360,7 @@ os_bypass:
/* Open the device */
-static int os_scsi_tape_open(struct inode * inode, struct file * filp)
+static int __os_scsi_tape_open(struct inode * inode, struct file * filp)
{
unsigned short flags;
int i, b_size, new_session = 0, retval = 0;
@@ -4725,6 +4726,18 @@ err_out:
return retval;
}
+/* BKL pushdown: spaghetti avoidance wrapper */
+static int os_scsi_tape_open(struct inode * inode, struct file * filp)
+{
+ int ret;
+
+ lock_kernel();
+ ret = __os_scsi_tape_open(inode, filp);
+ unlock_kernel();
+ return ret;
+}
+
+
/* Flush the tape buffer before close */
static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0c786944d2c2..5822dd595826 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -113,9 +113,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.host_param_mask = ISCSI_HOST_HWADDRESS |
ISCSI_HOST_IPADDRESS |
ISCSI_HOST_INITIATOR_NAME,
- .sessiondata_size = sizeof(struct ddb_entry),
- .host_template = &qla4xxx_driver_template,
-
.tgt_dscvr = qla4xxx_tgt_dscvr,
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
@@ -275,7 +272,7 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
return err;
}
- ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0);
+ ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0);
if (!ddb_entry->conn) {
iscsi_remove_session(ddb_entry->sess);
DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
@@ -292,7 +289,8 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
struct ddb_entry *ddb_entry;
struct iscsi_cls_session *sess;
- sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport);
+ sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
+ sizeof(struct ddb_entry));
if (!sess)
return NULL;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 110e776d1a07..36c92f961e15 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -855,9 +855,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
good_bytes = scsi_bufflen(cmd);
if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
+ int old_good_bytes = good_bytes;
drv = scsi_cmd_to_driver(cmd);
if (drv->done)
good_bytes = drv->done(cmd);
+ /*
+ * USB may not give sense identifying bad sector and
+ * simply return a residue instead, so subtract off the
+ * residue if drv->done() error processing indicates no
+ * change to the completion length.
+ */
+ if (good_bytes == old_good_bytes)
+ good_bytes -= scsi_get_resid(cmd);
}
scsi_io_completion(cmd, good_bytes);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f6600bfb5bde..01d11a01ffbf 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -94,6 +94,7 @@ static const char * scsi_debug_version_date = "20070104";
#define DEF_VIRTUAL_GB 0
#define DEF_FAKE_RW 0
#define DEF_VPD_USE_HOSTNO 1
+#define DEF_SECTOR_SIZE 512
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -142,6 +143,7 @@ static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
static int scsi_debug_cmnd_count = 0;
@@ -157,11 +159,6 @@ static int sdebug_heads; /* heads per disk */
static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */
-/* default sector size is 512 bytes, 2**9 bytes */
-#define POW2_SECT_SIZE 9
-#define SECT_SIZE (1 << POW2_SECT_SIZE)
-#define SECT_SIZE_PER(TGT) SECT_SIZE
-
#define SDEBUG_MAX_PARTS 4
#define SDEBUG_SENSE_LEN 32
@@ -646,6 +643,14 @@ static int inquiry_evpd_b0(unsigned char * arr)
return sizeof(vpdb0_data);
}
+static int inquiry_evpd_b1(unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0;
+ arr[1] = 1;
+
+ return 0x3c;
+}
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -701,6 +706,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[n++] = 0x88; /* SCSI ports */
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits (SBC) */
+ arr[n++] = 0xb1; /* Block characteristics (SBC) */
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */
@@ -740,6 +746,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
} else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b0(&arr[4]);
+ } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_evpd_b1(&arr[4]);
} else {
/* Illegal request, invalid field in cdb */
mk_sense_buffer(devip, ILLEGAL_REQUEST,
@@ -878,8 +887,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
arr[2] = 0xff;
arr[3] = 0xff;
}
- arr[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
- arr[7] = SECT_SIZE_PER(target) & 0xff;
+ arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
+ arr[7] = scsi_debug_sector_size & 0xff;
return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
}
@@ -902,10 +911,10 @@ static int resp_readcap16(struct scsi_cmnd * scp,
capac = sdebug_capacity - 1;
for (k = 0; k < 8; ++k, capac >>= 8)
arr[7 - k] = capac & 0xff;
- arr[8] = (SECT_SIZE_PER(target) >> 24) & 0xff;
- arr[9] = (SECT_SIZE_PER(target) >> 16) & 0xff;
- arr[10] = (SECT_SIZE_PER(target) >> 8) & 0xff;
- arr[11] = SECT_SIZE_PER(target) & 0xff;
+ arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
+ arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
+ arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
+ arr[11] = scsi_debug_sector_size & 0xff;
return fill_from_dev_buffer(scp, arr,
min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
@@ -1019,20 +1028,20 @@ static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
static int resp_format_pg(unsigned char * p, int pcontrol, int target)
{ /* Format device page for mode_sense */
- unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0x40, 0, 0, 0};
-
- memcpy(p, format_pg, sizeof(format_pg));
- p[10] = (sdebug_sectors_per >> 8) & 0xff;
- p[11] = sdebug_sectors_per & 0xff;
- p[12] = (SECT_SIZE >> 8) & 0xff;
- p[13] = SECT_SIZE & 0xff;
- if (DEV_REMOVEABLE(target))
- p[20] |= 0x20; /* should agree with INQUIRY */
- if (1 == pcontrol)
- memset(p + 2, 0, sizeof(format_pg) - 2);
- return sizeof(format_pg);
+ unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0x40, 0, 0, 0};
+
+ memcpy(p, format_pg, sizeof(format_pg));
+ p[10] = (sdebug_sectors_per >> 8) & 0xff;
+ p[11] = sdebug_sectors_per & 0xff;
+ p[12] = (scsi_debug_sector_size >> 8) & 0xff;
+ p[13] = scsi_debug_sector_size & 0xff;
+ if (DEV_REMOVEABLE(target))
+ p[20] |= 0x20; /* should agree with INQUIRY */
+ if (1 == pcontrol)
+ memset(p + 2, 0, sizeof(format_pg) - 2);
+ return sizeof(format_pg);
}
static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
@@ -1206,8 +1215,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
ap[2] = (sdebug_capacity >> 8) & 0xff;
ap[3] = sdebug_capacity & 0xff;
}
- ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
- ap[7] = SECT_SIZE_PER(target) & 0xff;
+ ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
+ ap[7] = scsi_debug_sector_size & 0xff;
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
@@ -1215,10 +1224,10 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
for (k = 0; k < 8; ++k, capac >>= 8)
ap[7 - k] = capac & 0xff;
- ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
- ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
- ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
- ap[15] = SECT_SIZE_PER(target) & 0xff;
+ ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
+ ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
+ ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
+ ap[15] = scsi_debug_sector_size & 0xff;
offset += bd_len;
ap = arr + offset;
}
@@ -1519,10 +1528,10 @@ static int do_device_access(struct scsi_cmnd *scmd,
if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors;
- ret = func(scmd, fake_storep + (block * SECT_SIZE),
- (num - rest) * SECT_SIZE);
+ ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
+ (num - rest) * scsi_debug_sector_size);
if (!ret && rest)
- ret = func(scmd, fake_storep, rest * SECT_SIZE);
+ ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
return ret;
}
@@ -1575,10 +1584,10 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
write_unlock_irqrestore(&atomic_rw, iflags);
if (-1 == ret)
return (DID_ERROR << 16);
- else if ((ret < (num * SECT_SIZE)) &&
+ else if ((ret < (num * scsi_debug_sector_size)) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
- " IO sent=%d bytes\n", num * SECT_SIZE, ret);
+ " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
return 0;
}
@@ -2085,6 +2094,7 @@ module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
+module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2106,6 +2116,7 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
static char sdebug_info[256];
@@ -2158,8 +2169,9 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta
scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
scsi_debug_cmnd_count, scsi_debug_delay,
scsi_debug_max_luns, scsi_debug_scsi_level,
- SECT_SIZE, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
- num_aborts, num_dev_resets, num_bus_resets, num_host_resets);
+ scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+ sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
+ num_host_resets);
if (pos < offset) {
len = 0;
begin = pos;
@@ -2434,6 +2446,12 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
sdebug_vpd_use_hostno_store);
+static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+}
+DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
+
/* Note: The following function creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -2459,11 +2477,13 @@ static int do_create_driverfs_files(void)
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
return ret;
}
static void do_remove_driverfs_files(void)
{
+ driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
@@ -2499,10 +2519,22 @@ static int __init scsi_debug_init(void)
int k;
int ret;
+ switch (scsi_debug_sector_size) {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ printk(KERN_ERR "scsi_debug_init: invalid sector_size %u\n",
+ scsi_debug_sector_size);
+ return -EINVAL;
+ }
+
if (scsi_debug_dev_size_mb < 1)
scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
- sdebug_store_sectors = sz / SECT_SIZE;
+ sdebug_store_sectors = sz / scsi_debug_sector_size;
sdebug_capacity = get_sdebug_capacity();
/* play around with geometry, don't waste too much on track 0 */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index eaf5a8add1ba..006a95916f72 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -298,6 +298,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
*/
static int scsi_check_sense(struct scsi_cmnd *scmd)
{
+ struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
if (! scsi_command_normalize_sense(scmd, &sshdr))
@@ -306,6 +307,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
+ if (sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh &&
+ sdev->scsi_dh_data->scsi_dh->check_sense) {
+ int rc;
+
+ rc = sdev->scsi_dh_data->scsi_dh->check_sense(sdev, &sshdr);
+ if (rc != SCSI_RETURN_NOT_HANDLED)
+ return rc;
+ /* handler does not care. Drop down to default handling */
+ }
+
/*
* Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS.
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a82d2fe80fb5..88d1b5f44e59 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
};
#undef SP
-static struct kmem_cache *scsi_bidi_sdb_cache;
+static struct kmem_cache *scsi_sdb_cache;
static void scsi_run_queue(struct request_queue *q);
@@ -207,6 +207,15 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
*/
blk_execute_rq(req->q, NULL, req, 1);
+ /*
+ * Some devices (USB mass-storage in particular) may transfer
+ * garbage data together with a residue indicating that the data
+ * is invalid. Prevent the garbage from being misinterpreted
+ * and prevent security leaks by zeroing out the excess data.
+ */
+ if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
+ memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+
ret = req->errors;
out:
blk_put_request(req);
@@ -775,7 +784,7 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
struct scsi_data_buffer *bidi_sdb =
cmd->request->next_rq->special;
scsi_free_sgtable(bidi_sdb);
- kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb);
+ kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
}
@@ -1050,7 +1059,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
if (blk_bidi_rq(cmd->request)) {
struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
- scsi_bidi_sdb_cache, GFP_ATOMIC);
+ scsi_sdb_cache, GFP_ATOMIC);
if (!bidi_sdb) {
error = BLKPREP_DEFER;
goto err_exit;
@@ -1160,6 +1169,14 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
if (ret != BLKPREP_OK)
return ret;
+
+ if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
+ && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
+ ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+ if (ret != BLKPREP_OK)
+ return ret;
+ }
+
/*
* Filesystem requests must transfer data.
*/
@@ -1320,7 +1337,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
printk("scsi%d unblocking host at zero depth\n",
shost->host_no));
} else {
- blk_plug_device(q);
return 0;
}
}
@@ -1684,11 +1700,11 @@ int __init scsi_init_queue(void)
return -ENOMEM;
}
- scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb",
- sizeof(struct scsi_data_buffer),
- 0, 0, NULL);
- if (!scsi_bidi_sdb_cache) {
- printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n");
+ scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
+ sizeof(struct scsi_data_buffer),
+ 0, 0, NULL);
+ if (!scsi_sdb_cache) {
+ printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
goto cleanup_io_context;
}
@@ -1701,7 +1717,7 @@ int __init scsi_init_queue(void)
if (!sgp->slab) {
printk(KERN_ERR "SCSI: can't init sg slab %s\n",
sgp->name);
- goto cleanup_bidi_sdb;
+ goto cleanup_sdb;
}
sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
@@ -1709,13 +1725,13 @@ int __init scsi_init_queue(void)
if (!sgp->pool) {
printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
sgp->name);
- goto cleanup_bidi_sdb;
+ goto cleanup_sdb;
}
}
return 0;
-cleanup_bidi_sdb:
+cleanup_sdb:
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
if (sgp->pool)
@@ -1723,7 +1739,7 @@ cleanup_bidi_sdb:
if (sgp->slab)
kmem_cache_destroy(sgp->slab);
}
- kmem_cache_destroy(scsi_bidi_sdb_cache);
+ kmem_cache_destroy(scsi_sdb_cache);
cleanup_io_context:
kmem_cache_destroy(scsi_io_context_cache);
@@ -1735,7 +1751,7 @@ void scsi_exit_queue(void)
int i;
kmem_cache_destroy(scsi_io_context_cache);
- kmem_cache_destroy(scsi_bidi_sdb_cache);
+ kmem_cache_destroy(scsi_sdb_cache);
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a00eee6f7be9..196fe3af0d5e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -346,7 +346,7 @@ static void scsi_target_dev_release(struct device *dev)
put_device(parent);
}
-struct device_type scsi_target_type = {
+static struct device_type scsi_target_type = {
.name = "scsi_target",
.release = scsi_target_dev_release,
};
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 93d2b6714453..b6e561059779 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -439,6 +439,7 @@ struct bus_type scsi_bus_type = {
.resume = scsi_bus_resume,
.remove = scsi_bus_remove,
};
+EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void)
{
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
index d2557dbc2dc1..0e9533f7aabc 100644
--- a/drivers/scsi/scsi_tgt_if.c
+++ b/drivers/scsi/scsi_tgt_if.c
@@ -21,6 +21,7 @@
*/
#include <linux/miscdevice.h>
#include <linux/file.h>
+#include <linux/smp_lock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -321,6 +322,7 @@ static int tgt_open(struct inode *inode, struct file *file)
{
tx_ring.tr_idx = rx_ring.tr_idx = 0;
+ cycle_kernel_lock();
return 0;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 65d1737eb664..3af7cbcc5c5d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,11 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
-#define ISCSI_SESSION_ATTRS 19
+#define ISCSI_SESSION_ATTRS 21
#define ISCSI_CONN_ATTRS 13
#define ISCSI_HOST_ATTRS 4
-#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+#define ISCSI_TRANSPORT_VERSION "2.0-870"
struct iscsi_internal {
int daemon_pid;
@@ -101,16 +102,10 @@ show_transport_##name(struct device *dev, \
static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
show_transport_attr(caps, "0x%x");
-show_transport_attr(max_lun, "%d");
-show_transport_attr(max_conn, "%d");
-show_transport_attr(max_cmd_len, "%d");
static struct attribute *iscsi_transport_attrs[] = {
&dev_attr_handle.attr,
&dev_attr_caps.attr,
- &dev_attr_max_lun.attr,
- &dev_attr_max_conn.attr,
- &dev_attr_max_cmd_len.attr,
NULL,
};
@@ -118,18 +113,139 @@ static struct attribute_group iscsi_transport_group = {
.attrs = iscsi_transport_attrs,
};
+/*
+ * iSCSI endpoint attrs
+ */
+#define iscsi_dev_to_endpoint(_dev) \
+ container_of(_dev, struct iscsi_endpoint, dev)
+
+#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+static void iscsi_endpoint_release(struct device *dev)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ kfree(ep);
+}
+
+static struct class iscsi_endpoint_class = {
+ .name = "iscsi_endpoint",
+ .dev_release = iscsi_endpoint_release,
+};
+
+static ssize_t
+show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ return sprintf(buf, "%u\n", ep->id);
+}
+static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+
+static struct attribute *iscsi_endpoint_attrs[] = {
+ &dev_attr_ep_handle.attr,
+ NULL,
+};
+
+static struct attribute_group iscsi_endpoint_group = {
+ .attrs = iscsi_endpoint_attrs,
+};
+#define ISCSI_MAX_EPID -1
+
+static int iscsi_match_epid(struct device *dev, void *data)
+{
+ struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+ unsigned int *epid = (unsigned int *) data;
+
+ return *epid == ep->id;
+}
+
+struct iscsi_endpoint *
+iscsi_create_endpoint(int dd_size)
+{
+ struct device *dev;
+ struct iscsi_endpoint *ep;
+ unsigned int id;
+ int err;
+
+ for (id = 1; id < ISCSI_MAX_EPID; id++) {
+ dev = class_find_device(&iscsi_endpoint_class, &id,
+ iscsi_match_epid);
+ if (!dev)
+ break;
+ }
+ if (id == ISCSI_MAX_EPID) {
+ printk(KERN_ERR "Too many connections. Max supported %u\n",
+ ISCSI_MAX_EPID - 1);
+ return NULL;
+ }
+
+ ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+ if (!ep)
+ return NULL;
+
+ ep->id = id;
+ ep->dev.class = &iscsi_endpoint_class;
+ snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+ err = device_register(&ep->dev);
+ if (err)
+ goto free_ep;
+
+ err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+ if (err)
+ goto unregister_dev;
+
+ if (dd_size)
+ ep->dd_data = &ep[1];
+ return ep;
+
+unregister_dev:
+ device_unregister(&ep->dev);
+ return NULL;
+
+free_ep:
+ kfree(ep);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+
+void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+{
+ sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+ device_unregister(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+
+struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+{
+ struct iscsi_endpoint *ep;
+ struct device *dev;
+
+ dev = class_find_device(&iscsi_endpoint_class, &handle,
+ iscsi_match_epid);
+ if (!dev)
+ return NULL;
+
+ ep = iscsi_dev_to_endpoint(dev);
+ /*
+ * we can drop this now because the interface will prevent
+ * removals and lookups from racing.
+ */
+ put_device(dev);
+ return ep;
+}
+EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost));
- INIT_LIST_HEAD(&ihost->sessions);
- mutex_init(&ihost->mutex);
atomic_set(&ihost->nr_scans, 0);
+ mutex_init(&ihost->mutex);
snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
shost->host_no);
@@ -144,7 +260,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct Scsi_Host *shost = dev_to_shost(dev);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
destroy_workqueue(ihost->scan_workq);
return 0;
@@ -287,6 +403,24 @@ static int iscsi_is_session_dev(const struct device *dev)
return dev->release == iscsi_session_release;
}
+static int iscsi_iter_session_fn(struct device *dev, void *data)
+{
+ void (* fn) (struct iscsi_cls_session *) = data;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+ fn(iscsi_dev_to_session(dev));
+ return 0;
+}
+
+void iscsi_host_for_each_session(struct Scsi_Host *shost,
+ void (*fn)(struct iscsi_cls_session *))
+{
+ device_for_each_child(&shost->shost_gendev, fn,
+ iscsi_iter_session_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+
/**
* iscsi_scan_finished - helper to report when running scans are done
* @shost: scsi host
@@ -297,7 +431,7 @@ static int iscsi_is_session_dev(const struct device *dev)
*/
int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
/*
* qla4xxx will have kicked off some session unblocks before calling
* scsi_scan_host, so just wait for them to complete.
@@ -306,42 +440,76 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(iscsi_scan_finished);
-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, uint lun)
+struct iscsi_scan_data {
+ unsigned int channel;
+ unsigned int id;
+ unsigned int lun;
+};
+
+static int iscsi_user_scan_session(struct device *dev, void *data)
{
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_scan_data *scan_data = data;
struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_cls_host *ihost;
+ unsigned long flags;
+ unsigned int id;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+
+ session = iscsi_dev_to_session(dev);
+ shost = iscsi_session_to_shost(session);
+ ihost = shost->shost_data;
mutex_lock(&ihost->mutex);
- list_for_each_entry(session, &ihost->sessions, host_list) {
- if ((channel == SCAN_WILD_CARD || channel == 0) &&
- (id == SCAN_WILD_CARD || id == session->target_id))
- scsi_scan_target(&session->dev, 0,
- session->target_id, lun, 1);
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state != ISCSI_SESSION_LOGGED_IN) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ mutex_unlock(&ihost->mutex);
+ return 0;
}
- mutex_unlock(&ihost->mutex);
+ id = session->target_id;
+ spin_unlock_irqrestore(&session->lock, flags);
+ if (id != ISCSI_MAX_TARGET) {
+ if ((scan_data->channel == SCAN_WILD_CARD ||
+ scan_data->channel == 0) &&
+ (scan_data->id == SCAN_WILD_CARD ||
+ scan_data->id == id))
+ scsi_scan_target(&session->dev, 0, id,
+ scan_data->lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
return 0;
}
+static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+ uint id, uint lun)
+{
+ struct iscsi_scan_data scan_data;
+
+ scan_data.channel = channel;
+ scan_data.id = id;
+ scan_data.lun = lun;
+
+ return device_for_each_child(&shost->shost_gendev, &scan_data,
+ iscsi_user_scan_session);
+}
+
static void iscsi_scan_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost = shost->shost_data;
- unsigned long flags;
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ struct iscsi_scan_data scan_data;
- spin_lock_irqsave(&session->lock, flags);
- if (session->state != ISCSI_SESSION_LOGGED_IN) {
- spin_unlock_irqrestore(&session->lock, flags);
- goto done;
- }
- spin_unlock_irqrestore(&session->lock, flags);
+ scan_data.channel = 0;
+ scan_data.id = SCAN_WILD_CARD;
+ scan_data.lun = SCAN_WILD_CARD;
- scsi_scan_target(&session->dev, 0, session->target_id,
- SCAN_WILD_CARD, 1);
-done:
+ iscsi_user_scan_session(&session->dev, &scan_data);
atomic_dec(&ihost->nr_scans);
}
@@ -381,7 +549,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
container_of(work, struct iscsi_cls_session,
unblock_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
/*
@@ -449,15 +617,19 @@ static void __iscsi_unbind_session(struct work_struct *work)
container_of(work, struct iscsi_cls_session,
unbind_work);
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
+ unsigned long flags;
/* Prevent new scans and make sure scanning is not in progress */
mutex_lock(&ihost->mutex);
- if (list_empty(&session->host_list)) {
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->target_id == ISCSI_MAX_TARGET) {
+ spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return;
}
- list_del_init(&session->host_list);
+ session->target_id = ISCSI_MAX_TARGET;
+ spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
scsi_remove_target(&session->dev);
@@ -467,18 +639,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
static int iscsi_unbind_session(struct iscsi_cls_session *session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
return queue_work(ihost->scan_workq, &session->unbind_work);
}
struct iscsi_cls_session *
-iscsi_alloc_session(struct Scsi_Host *shost,
- struct iscsi_transport *transport)
+iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ int dd_size)
{
struct iscsi_cls_session *session;
- session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ session = kzalloc(sizeof(*session) + dd_size,
GFP_KERNEL);
if (!session)
return NULL;
@@ -487,7 +659,6 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
- INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
INIT_WORK(&session->block_work, __iscsi_block_session);
@@ -500,22 +671,57 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->dev.parent = &shost->shost_gendev;
session->dev.release = iscsi_session_release;
device_initialize(&session->dev);
- if (transport->sessiondata_size)
+ if (dd_size)
session->dd_data = &session[1];
return session;
}
EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+static int iscsi_get_next_target_id(struct device *dev, void *data)
+{
+ struct iscsi_cls_session *session;
+ unsigned long flags;
+ int err = 0;
+
+ if (!iscsi_is_session_dev(dev))
+ return 0;
+
+ session = iscsi_dev_to_session(dev);
+ spin_lock_irqsave(&session->lock, flags);
+ if (*((unsigned int *) data) == session->target_id)
+ err = -EEXIST;
+ spin_unlock_irqrestore(&session->lock, flags);
+ return err;
+}
+
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost;
+ struct iscsi_cls_host *ihost;
unsigned long flags;
+ unsigned int id = target_id;
int err;
ihost = shost->shost_data;
session->sid = atomic_add_return(1, &iscsi_session_nr);
- session->target_id = target_id;
+
+ if (id == ISCSI_MAX_TARGET) {
+ for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+ err = device_for_each_child(&shost->shost_gendev, &id,
+ iscsi_get_next_target_id);
+ if (!err)
+ break;
+ }
+
+ if (id == ISCSI_MAX_TARGET) {
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Too many iscsi targets. Max "
+ "number of targets is %d.\n",
+ ISCSI_MAX_TARGET - 1);
+ goto release_host;
+ }
+ }
+ session->target_id = id;
snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
session->sid);
@@ -531,10 +737,6 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
list_add(&session->sess_list, &sesslist);
spin_unlock_irqrestore(&sesslock, flags);
- mutex_lock(&ihost->mutex);
- list_add(&session->host_list, &ihost->sessions);
- mutex_unlock(&ihost->mutex);
-
iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
return 0;
@@ -548,18 +750,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
* iscsi_create_session - create iscsi class session
* @shost: scsi host
* @transport: iscsi transport
+ * @dd_size: private driver data size
* @target_id: which target
*
* This can be called from a LLD or iscsi_transport.
*/
struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost,
- struct iscsi_transport *transport,
- unsigned int target_id)
+iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ int dd_size, unsigned int target_id)
{
struct iscsi_cls_session *session;
- session = iscsi_alloc_session(shost, transport);
+ session = iscsi_alloc_session(shost, transport, dd_size);
if (!session)
return NULL;
@@ -595,7 +797,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
void iscsi_remove_session(struct iscsi_cls_session *session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
int err;
@@ -661,6 +863,7 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
+ * @dd_size: private driver data size
* @cid: connection id
*
* This can be called from a LLD or iscsi_transport. The connection
@@ -673,18 +876,17 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
* non-zero.
*/
struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
unsigned long flags;
int err;
- conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn)
return NULL;
-
- if (transport->conndata_size)
+ if (dd_size)
conn->dd_data = &conn[1];
INIT_LIST_HEAD(&conn->conn_list);
@@ -1017,21 +1219,20 @@ int iscsi_session_event(struct iscsi_cls_session *session,
EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+ struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+ uint16_t cmds_max, uint16_t queue_depth)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
- uint32_t hostno;
+ uint32_t host_no;
- session = transport->create_session(transport, &priv->t,
- ev->u.c_session.cmds_max,
- ev->u.c_session.queue_depth,
- ev->u.c_session.initial_cmdsn,
- &hostno);
+ session = transport->create_session(ep, cmds_max, queue_depth,
+ initial_cmdsn, &host_no);
if (!session)
return -ENOMEM;
- ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.host_no = host_no;
ev->r.c_session_ret.sid = session->sid;
return 0;
}
@@ -1106,6 +1307,7 @@ static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
{
+ struct iscsi_endpoint *ep;
struct sockaddr *dst_addr;
int rc = 0;
@@ -1115,22 +1317,33 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
return -EINVAL;
dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
- rc = transport->ep_connect(dst_addr,
- ev->u.ep_connect.non_blocking,
- &ev->r.ep_connect_ret.handle);
+ ep = transport->ep_connect(dst_addr,
+ ev->u.ep_connect.non_blocking);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
+ ev->r.ep_connect_ret.handle = ep->id;
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
return -EINVAL;
- ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+ if (!ep)
+ return -EINVAL;
+
+ ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
if (!transport->ep_disconnect)
return -EINVAL;
- transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+ if (!ep)
+ return -EINVAL;
+
+ transport->ep_disconnect(ep);
break;
}
return rc;
@@ -1195,6 +1408,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep = NULL;
priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
if (!priv)
@@ -1208,7 +1422,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
- err = iscsi_if_create_session(priv, ev);
+ err = iscsi_if_create_session(priv, ep, ev,
+ ev->u.c_session.initial_cmdsn,
+ ev->u.c_session.cmds_max,
+ ev->u.c_session.queue_depth);
+ break;
+ case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+ ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+ if (!ep) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = iscsi_if_create_session(priv, ep, ev,
+ ev->u.c_bound_session.initial_cmdsn,
+ ev->u.c_bound_session.cmds_max,
+ ev->u.c_bound_session.queue_depth);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -1414,6 +1643,8 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -1580,6 +1811,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->daemon_pid = -1;
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
+ if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+ priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class;
snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
@@ -1595,7 +1828,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
priv->t.host_attrs.ac.class = &iscsi_host_class.class;
priv->t.host_attrs.ac.match = iscsi_host_match;
- priv->t.host_size = sizeof(struct iscsi_host);
+ priv->t.host_size = sizeof(struct iscsi_cls_host);
transport_container_register(&priv->t.host_attrs);
SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
@@ -1653,6 +1886,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+ SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+ SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
SETUP_PRIV_SESSION_RD_ATTR(state);
@@ -1668,6 +1903,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
unregister_dev:
device_unregister(&priv->dev);
+ return NULL;
free_priv:
kfree(priv);
return NULL;
@@ -1715,10 +1951,14 @@ static __init int iscsi_transport_init(void)
if (err)
return err;
- err = transport_class_register(&iscsi_host_class);
+ err = class_register(&iscsi_endpoint_class);
if (err)
goto unregister_transport_class;
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+ goto unregister_endpoint_class;
+
err = transport_class_register(&iscsi_connection_class);
if (err)
goto unregister_host_class;
@@ -1727,8 +1967,8 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_conn_class;
- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
- THIS_MODULE);
+ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+ NULL, THIS_MODULE);
if (!nls) {
err = -ENOBUFS;
goto unregister_session_class;
@@ -1748,6 +1988,8 @@ unregister_conn_class:
transport_class_unregister(&iscsi_connection_class);
unregister_host_class:
transport_class_unregister(&iscsi_host_class);
+unregister_endpoint_class:
+ class_unregister(&iscsi_endpoint_class);
unregister_transport_class:
class_unregister(&iscsi_transport_class);
return err;
@@ -1760,6 +2002,7 @@ static void __exit iscsi_transport_exit(void)
transport_class_unregister(&iscsi_connection_class);
transport_class_unregister(&iscsi_session_class);
transport_class_unregister(&iscsi_host_class);
+ class_unregister(&iscsi_endpoint_class);
class_unregister(&iscsi_transport_class);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 01cefbb2d539..0c63947d8a9d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -58,8 +58,8 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>
-#include <scsi/sd.h>
+#include "sd.h"
#include "scsi_logging.h"
MODULE_AUTHOR("Eric Youngdale");
@@ -295,11 +295,6 @@ static int sd_major(int major_idx)
}
}
-static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
-{
- return container_of(disk->private_data, struct scsi_disk, driver);
-}
-
static struct scsi_disk *__scsi_disk_get(struct gendisk *disk)
{
struct scsi_disk *sdkp = NULL;
@@ -1124,6 +1119,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[1] = 1; /* Return immediately */
memset((void *) &cmd[2], 0, 8);
cmd[4] = 1; /* Start spin cycle */
+ if (sdkp->device->start_stop_pwr_cond)
+ cmd[4] |= 1 << 4;
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
SD_TIMEOUT, SD_MAX_RETRIES);
@@ -1790,6 +1787,9 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (start)
cmd[4] |= 1; /* START */
+ if (sdp->start_stop_pwr_cond)
+ cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
+
if (!scsi_device_online(sdp))
return -ENODEV;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
new file mode 100644
index 000000000000..03a3d45cfa42
--- /dev/null
+++ b/drivers/scsi/sd.h
@@ -0,0 +1,62 @@
+#ifndef _SCSI_DISK_H
+#define _SCSI_DISK_H
+
+/*
+ * More than enough for everybody ;) The huge number of majors
+ * is a leftover from 16bit dev_t days, we don't really need that
+ * much numberspace.
+ */
+#define SD_MAJORS 16
+
+/*
+ * This is limited by the naming scheme enforced in sd_probe,
+ * add another character to it if you really need more disks.
+ */
+#define SD_MAX_DISKS (((26 * 26) + 26 + 1) * 26)
+
+/*
+ * Time out in seconds for disks and Magneto-opticals (which are slower).
+ */
+#define SD_TIMEOUT (30 * HZ)
+#define SD_MOD_TIMEOUT (75 * HZ)
+
+/*
+ * Number of allowed retries
+ */
+#define SD_MAX_RETRIES 5
+#define SD_PASSTHROUGH_RETRIES 1
+
+/*
+ * Size of the initial data buffer for mode and read capacity data
+ */
+#define SD_BUF_SIZE 512
+
+struct scsi_disk {
+ struct scsi_driver *driver; /* always &sd_template */
+ struct scsi_device *device;
+ struct device dev;
+ struct gendisk *disk;
+ unsigned int openers; /* protected by BKL for now, yuck */
+ sector_t capacity; /* size in 512-byte sectors */
+ u32 index;
+ u8 media_present;
+ u8 write_prot;
+ unsigned previous_state : 1;
+ unsigned WCE : 1; /* state of disk WCE bit */
+ unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
+};
+#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+
+static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
+{
+ return container_of(disk->private_data, struct scsi_disk, driver);
+}
+
+#define sd_printk(prefix, sdsk, fmt, a...) \
+ (sdsk)->disk ? \
+ sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \
+ (sdsk)->disk->disk_name, ##a) : \
+ sdev_printk(prefix, (sdsk)->device, fmt, ##a)
+
+#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ea0edd1b2e76..d3b8ebb83776 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -49,6 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/blktrace_api.h>
+#include <linux/smp_lock.h>
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -182,8 +183,9 @@ static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
int tablesize);
static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
Sg_request * srp);
-static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
- int blocking, int read_only, Sg_request ** o_srp);
+static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
+ const char __user *buf, size_t count, int blocking,
+ int read_only, Sg_request **o_srp);
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
@@ -204,7 +206,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
-static int sg_allow_access(unsigned char opcode, char dev_type);
static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
static Sg_device *sg_get_dev(int dev);
#ifdef CONFIG_SCSI_PROC_FS
@@ -227,19 +228,26 @@ sg_open(struct inode *inode, struct file *filp)
int res;
int retval;
+ lock_kernel();
nonseekable_open(inode, filp);
SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
sdp = sg_get_dev(dev);
- if ((!sdp) || (!sdp->device))
+ if ((!sdp) || (!sdp->device)) {
+ unlock_kernel();
return -ENXIO;
- if (sdp->detached)
+ }
+ if (sdp->detached) {
+ unlock_kernel();
return -ENODEV;
+ }
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
retval = scsi_device_get(sdp->device);
- if (retval)
+ if (retval) {
+ unlock_kernel();
return retval;
+ }
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
@@ -295,10 +303,12 @@ sg_open(struct inode *inode, struct file *filp)
retval = -ENOMEM;
goto error_out;
}
+ unlock_kernel();
return 0;
error_out:
scsi_device_put(sdp->device);
+ unlock_kernel();
return retval;
}
@@ -544,7 +554,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
- return sg_new_write(sfp, buf, count, blocking, 0, NULL);
+ return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
@@ -621,8 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
static ssize_t
-sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
- int blocking, int read_only, Sg_request ** o_srp)
+sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+ size_t count, int blocking, int read_only,
+ Sg_request **o_srp)
{
int k;
Sg_request *srp;
@@ -678,8 +689,7 @@ sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
sg_remove_request(sfp, srp);
return -EFAULT;
}
- if (read_only &&
- (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
+ if (read_only && !blk_verify_command(file, cmnd)) {
sg_remove_request(sfp, srp);
return -EPERM;
}
@@ -799,7 +809,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
result =
- sg_new_write(sfp, p, SZ_SG_IO_HDR,
+ sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
blocking, read_only, &srp);
if (result < 0)
return result;
@@ -1026,6 +1036,9 @@ sg_ioctl(struct inode *inode, struct file *filp,
case SG_SCSI_RESET_DEVICE:
val = SCSI_TRY_RESET_DEVICE;
break;
+ case SG_SCSI_RESET_TARGET:
+ val = SCSI_TRY_RESET_TARGET;
+ break;
case SG_SCSI_RESET_BUS:
val = SCSI_TRY_RESET_BUS;
break;
@@ -1048,7 +1061,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (copy_from_user(&opcode, siocp->data, 1))
return -EFAULT;
- if (!sg_allow_access(opcode, sdp->device->type))
+ if (!blk_verify_command(filp, &opcode))
return -EPERM;
}
return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
@@ -2502,30 +2515,6 @@ sg_page_free(struct page *page, int size)
__free_pages(page, order);
}
-#ifndef MAINTENANCE_IN_CMD
-#define MAINTENANCE_IN_CMD 0xa3
-#endif
-
-static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
- INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
- READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
- SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
-};
-
-static int
-sg_allow_access(unsigned char opcode, char dev_type)
-{
- int k;
-
- if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
- return 1;
- for (k = 0; k < sizeof (allow_ops); ++k) {
- if (opcode == allow_ops[k])
- return 1;
- }
- return 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index c82df8bd4d89..27f5bfd1def3 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -673,24 +673,20 @@ fail:
static void get_sectorsize(struct scsi_cd *cd)
{
unsigned char cmd[10];
- unsigned char *buffer;
+ unsigned char buffer[8];
int the_result, retries = 3;
int sector_size;
struct request_queue *queue;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
- if (!buffer)
- goto Enomem;
-
do {
cmd[0] = READ_CAPACITY;
memset((void *) &cmd[1], 0, 9);
- memset(buffer, 0, 8);
+ memset(buffer, 0, sizeof(buffer));
/* Do the command and wait.. */
the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
- buffer, 8, NULL, SR_TIMEOUT,
- MAX_RETRIES);
+ buffer, sizeof(buffer), NULL,
+ SR_TIMEOUT, MAX_RETRIES);
retries--;
@@ -745,14 +741,8 @@ static void get_sectorsize(struct scsi_cd *cd)
queue = cd->device->request_queue;
blk_queue_hardsect_size(queue, sector_size);
-out:
- kfree(buffer);
- return;
-Enomem:
- cd->capacity = 0x1fffff;
- cd->device->sector_size = 2048; /* A guess, just in case */
- goto out;
+ return;
}
static void get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 6e5a5bb31311..4684cc716aa4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -38,6 +38,7 @@ static const char *verstr = "20080224";
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/dma.h>
@@ -1113,7 +1114,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
}
- /* Open the device. Needs to be called with BKL only because of incrementing the SCSI host
+ /* Open the device. Needs to take the BKL only because of incrementing the SCSI host
module count. */
static int st_open(struct inode *inode, struct file *filp)
{
@@ -1123,6 +1124,7 @@ static int st_open(struct inode *inode, struct file *filp)
int dev = TAPE_NR(inode);
char *name;
+ lock_kernel();
/*
* We really want to do nonseekable_open(inode, filp); here, but some
* versions of tar incorrectly call lseek on tapes and bail out if that
@@ -1130,8 +1132,10 @@ static int st_open(struct inode *inode, struct file *filp)
*/
filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
- if (!(STp = scsi_tape_get(dev)))
+ if (!(STp = scsi_tape_get(dev))) {
+ unlock_kernel();
return -ENXIO;
+ }
write_lock(&st_dev_arr_lock);
filp->private_data = STp;
@@ -1140,6 +1144,7 @@ static int st_open(struct inode *inode, struct file *filp)
if (STp->in_use) {
write_unlock(&st_dev_arr_lock);
scsi_tape_put(STp);
+ unlock_kernel();
DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
return (-EBUSY);
}
@@ -1188,12 +1193,14 @@ static int st_open(struct inode *inode, struct file *filp)
retval = (-EIO);
goto err_out;
}
+ unlock_kernel();
return 0;
err_out:
normalize_buffer(STp->buffer);
STp->in_use = 0;
scsi_tape_put(STp);
+ unlock_kernel();
return retval;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_misc.h b/drivers/scsi/sym53c8xx_2/sym_misc.h
index 0433d5d0caf3..430537183c18 100644
--- a/drivers/scsi/sym53c8xx_2/sym_misc.h
+++ b/drivers/scsi/sym53c8xx_2/sym_misc.h
@@ -121,9 +121,7 @@ static __inline void sym_que_move(struct sym_quehead *orig,
}
}
-#define sym_que_entry(ptr, type, member) \
- ((type *)((char *)(ptr)-(unsigned int)(&((type *)0)->member)))
-
+#define sym_que_entry(ptr, type, member) container_of(ptr, type, member)
#define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)