summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/53c700.c8
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/aachba.c17
-rw-r--r--drivers/scsi/aacraid/aacraid.h22
-rw-r--r--drivers/scsi/aacraid/commctrl.c19
-rw-r--r--drivers/scsi/aacraid/comminit.c18
-rw-r--r--drivers/scsi/aacraid/commsup.c78
-rw-r--r--drivers/scsi/aacraid/linit.c232
-rw-r--r--drivers/scsi/aacraid/src.c136
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c64
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c98
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h14
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c29
-rw-r--r--drivers/scsi/csiostor/csio_init.c6
-rw-r--r--drivers/scsi/csiostor/csio_init.h2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c43
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c15
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/cxlflash/common.h48
-rw-r--r--drivers/scsi/cxlflash/main.c1048
-rw-r--r--drivers/scsi/cxlflash/main.h7
-rw-r--r--drivers/scsi/cxlflash/sislite.h27
-rw-r--r--drivers/scsi/cxlflash/superpipe.c34
-rw-r--r--drivers/scsi/cxlflash/vlun.c89
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c27
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h2
-rw-r--r--drivers/scsi/esas2r/esas2r.h4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c10
-rw-r--r--drivers/scsi/fcoe/fcoe.c14
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c1
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c33
-rw-r--r--drivers/scsi/fnic/fnic_io.h9
-rw-r--r--drivers/scsi/fnic/fnic_main.c14
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c17
-rw-r--r--drivers/scsi/fnic/fnic_stats.h7
-rw-r--r--drivers/scsi/fnic/fnic_trace.c24
-rw-r--r--drivers/scsi/hisi_sas/Kconfig10
-rw-r--r--drivers/scsi/hisi_sas/Makefile1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h91
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c436
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c82
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c216
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1846
-rw-r--r--drivers/scsi/hpsa.c851
-rw-r--r--drivers/scsi/hpsa.h4
-rw-r--r--drivers/scsi/hpsa_cmd.h20
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ips.c12
-rw-r--r--drivers/scsi/ips.h4
-rw-r--r--drivers/scsi/libfc/fc_libfc.c2
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_event.c36
-rw-r--r--drivers/scsi/libsas/sas_internal.h4
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c163
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c211
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c308
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/osd/osd_initiator.c29
-rw-r--r--drivers/scsi/osst.c3
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_attr.c59
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_els.c8
-rw-r--r--drivers/scsi/qedf/qedf_fip.c25
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h2
-rw-r--r--drivers/scsi/qedf/qedf_io.c37
-rw-r--r--drivers/scsi/qedf/qedf_main.c209
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_fw.c24
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h182
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c145
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h35
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c275
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c606
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h60
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c229
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c125
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h132
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c368
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1438
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h58
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c36
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c11
-rw-r--r--drivers/scsi/scsi_lib.c406
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c53
-rw-r--r--drivers/scsi/scsi_sysfs.c42
-rw-r--r--drivers/scsi/scsi_transport_fc.c28
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_srp.c7
-rw-r--r--drivers/scsi/scsicam.c4
-rw-r--r--drivers/scsi/sd.c131
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sgiwd93.c10
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h194
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2325
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h7
-rw-r--r--drivers/scsi/snic/snic_isr.c4
-rw-r--r--drivers/scsi/snic/snic_scsi.c4
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/storvsc_drv.c56
-rw-r--r--drivers/scsi/sun_esp.c9
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c60
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c15
-rw-r--r--drivers/scsi/virtio_scsi.c13
-rw-r--r--drivers/scsi/xen-scsifront.c1
171 files changed, 11466 insertions, 4516 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 95e32a47face..4b3b08025ef6 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -296,8 +296,8 @@ NCR_700_detect(struct scsi_host_template *tpnt,
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
- memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
- &pScript, GFP_KERNEL);
+ memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if(memory == NULL) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
return NULL;
@@ -410,8 +410,8 @@ NCR_700_release(struct Scsi_Host *host)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
- hostdata->script, hostdata->pScript);
+ dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
+ hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
return 1;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3c52867dfe28..d384f4f86c26 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -47,17 +47,6 @@ config SCSI_NETLINK
default n
depends on NET
-config SCSI_MQ_DEFAULT
- bool "SCSI: use blk-mq I/O path by default"
- depends on SCSI
- ---help---
- This option enables the new blk-mq based I/O path for SCSI
- devices by default. With the option the scsi_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overridden either way.
-
- If unsure say N.
-
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 43d88389e899..707ee2f5954d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2071,20 +2071,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
expose_physicals = 0;
}
- if(dev->dac_support != 0) {
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (dev->dac_support) {
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
- printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
- dev->name, dev->id);
- } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
- !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
- printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
- dev->name, dev->id);
+ dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
+ } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
dev->dac_support = 0;
} else {
- printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
- dev->name, dev->id);
+ dev_info(&dev->pdev->dev, "No suitable DMA available\n");
rcode = -ENOMEM;
}
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d281492009fb..d31a9bc2ba69 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -97,7 +97,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50792
+# define AAC_DRIVER_BUILD 50834
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -415,6 +415,7 @@ struct aac_ciss_identify_pd {
* These macros convert from physical channels to virtual channels
*/
#define CONTAINER_CHANNEL (0)
+#define NATIVE_CHANNEL (1)
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
@@ -423,7 +424,6 @@ struct aac_ciss_identify_pd {
#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
#define PMC_DEVICE_S8 0x28d
-#define PMC_DEVICE_S9 0x28f
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
@@ -2377,6 +2377,7 @@ struct revision
#define SOFT_RESET_TIME 60
+
struct aac_common
{
/*
@@ -2487,7 +2488,9 @@ struct aac_hba_info {
#define IOP_RESET_FW_FIB_DUMP 0x00000034
#define IOP_RESET 0x00001000
#define IOP_RESET_ALWAYS 0x00001001
-#define RE_INIT_ADAPTER 0x000000ee
+#define RE_INIT_ADAPTER 0x000000ee
+
+#define IOP_SRC_RESET_MASK 0x00000100
/*
* Adapter Status Register
@@ -2512,6 +2515,7 @@ struct aac_hba_info {
#define SELF_TEST_FAILED 0x00000004
#define MONITOR_PANIC 0x00000020
+#define KERNEL_BOOTING 0x00000040
#define KERNEL_UP_AND_RUNNING 0x00000080
#define KERNEL_PANIC 0x00000100
#define FLASH_UPD_PENDING 0x00002000
@@ -2684,6 +2688,18 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+
+static inline int aac_is_src(struct aac_dev *dev)
+{
+ u16 device = dev->pdev->device;
+
+ if (device == PMC_DEVICE_S6 ||
+ device == PMC_DEVICE_S7 ||
+ device == PMC_DEVICE_S8)
+ return 1;
+ return 0;
+}
+
char * get_container_type(unsigned type);
extern int numacb;
extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index d2f8d5954840..9ab0fa959d83 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -668,7 +668,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if (!p) {
rcode = -ENOMEM;
goto cleanup;
@@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
@@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
@@ -845,8 +845,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
@@ -887,7 +886,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- p = kmalloc(sg_count[i], GFP_KERNEL);
+ p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
@@ -950,12 +949,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
&((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
struct aac_srb_reply reply;
+ memset(&reply, 0, sizeof(reply));
reply.status = ST_OK;
if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
/* fast response */
reply.srb_status = SRB_STATUS_SUCCESS;
reply.scsi_status = 0;
reply.data_xfer_length = byte_count;
+ reply.sense_data_size = 0;
+ memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
} else {
reply.srb_status = err->service_response;
reply.scsi_status = err->status;
@@ -1019,6 +1021,7 @@ static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
{
struct aac_hba_info hbainfo;
+ memset(&hbainfo, 0, sizeof(hbainfo));
hbainfo.adapter_number = (u8) dev->id;
hbainfo.system_io_bus_number = dev->pdev->bus->number;
hbainfo.device_number = (dev->pdev->devfn >> 3);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1151505853cf..9ee025b1d0e0 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -53,11 +53,8 @@ static inline int aac_is_msix_mode(struct aac_dev *dev)
{
u32 status = 0;
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8) {
+ if (aac_is_src(dev))
status = src_readl(dev, MUnit.OMR);
- }
return (status & AAC_INT_MODE_MSIX);
}
@@ -325,9 +322,7 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
- if ((dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) &&
+ if (aac_is_src(dev) &&
dev->msi_enabled)
aac_set_intx_mode(dev);
return status;
@@ -583,9 +578,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- if (dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(dev)) {
if (host->can_queue > (status[3] >> 16) -
AAC_NUM_MGT_FIB)
host->can_queue = (status[3] >> 16) -
@@ -604,10 +597,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
pr_warn("numacb=%d ignored\n", numacb);
}
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9)
+ if (aac_is_src(dev))
aac_define_int_mode(dev);
/*
* Ok now init the communication subsystem
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 7a1b8a2ce658..1c617ccfaf12 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -803,11 +803,11 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
if (aac_check_eeh_failure(dev))
return -EFAULT;
- /* Only set for first known interruptable command */
- if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+ if (down_interruptible(&fibptr->event_wait))
fibptr->done = 2;
- up(&fibptr->event_wait);
- }
+ fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
+
spin_lock_irqsave(&fibptr->event_lock, flags);
if ((fibptr->done == 0) || (fibptr->done == 2)) {
fibptr->done = 2; /* Tell interrupt we aborted */
@@ -1513,6 +1513,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
struct scsi_cmnd *command_list;
int jafo = 0;
int bled;
+ u64 dmamask;
+ int num_of_fibs = 0;
/*
* Assumptions:
@@ -1546,10 +1548,20 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
/*
* Loop through the fibs, close the synchronous FIBS
*/
- for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+ retval = 1;
+ num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+ for (index = 0; index < num_of_fibs; index++) {
+
struct fib *fib = &aac->fibs[index];
- if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
- (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
+ __le32 XferState = fib->hw_fib_va->header.XferState;
+ bool is_response_expected = false;
+
+ if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (XferState & cpu_to_le32(ResponseExpected)))
+ is_response_expected = true;
+
+ if (is_response_expected
+ || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
up(&fib->event_wait);
@@ -1580,21 +1592,27 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac_free_irq(aac);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
+
+ dmamask = DMA_BIT_MASK(32);
quirks = aac_get_driver_ident(index)->quirks;
- if (quirks & AAC_QUIRK_31BIT) {
- if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
- goto out;
- } else {
- if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
- goto out;
+ if (quirks & AAC_QUIRK_31BIT)
+ retval = pci_set_dma_mask(aac->pdev, dmamask);
+ else if (!(quirks & AAC_QUIRK_SRC))
+ retval = pci_set_dma_mask(aac->pdev, dmamask);
+ else
+ retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+
+ if (quirks & AAC_QUIRK_31BIT && !retval) {
+ dmamask = DMA_BIT_MASK(31);
+ retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
}
+
+ if (retval)
+ goto out;
+
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
goto out;
- if (quirks & AAC_QUIRK_31BIT)
- if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
- goto out;
+
if (jafo) {
aac->thread = kthread_run(aac_command_thread, aac, "%s",
aac->name);
@@ -1768,8 +1786,6 @@ int aac_check_health(struct aac_dev * aac)
int BlinkLED;
unsigned long time_now, flagv = 0;
struct list_head * entry;
- struct Scsi_Host * host;
- int bled;
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1881,19 +1897,6 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
- if (!aac_check_reset || ((aac_check_reset == 1) &&
- (aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_IGNORE_RESET)))
- goto out;
- host = aac->scsi_host_ptr;
- if (aac->thread->pid != current->pid)
- spin_lock_irqsave(host->host_lock, flagv);
- bled = aac_check_reset != 1 ? 1 : 0;
- _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
- if (aac->thread->pid != current->pid)
- spin_unlock_irqrestore(host->host_lock, flagv);
- return BlinkLED;
-
out:
aac->in_reset = 0;
return BlinkLED;
@@ -2483,7 +2486,7 @@ int aac_command_thread(void *data)
if ((time_before(next_check_jiffies,next_jiffies))
&& ((difference = next_check_jiffies - jiffies) <= 0)) {
next_check_jiffies = next_jiffies;
- if (aac_check_health(dev) == 0) {
+ if (aac_adapter_check_health(dev) == 0) {
difference = ((long)(unsigned)check_interval)
* HZ;
next_check_jiffies = jiffies + difference;
@@ -2496,7 +2499,7 @@ int aac_command_thread(void *data)
int ret;
/* Don't even try to talk to adapter if its sick */
- ret = aac_check_health(dev);
+ ret = aac_adapter_check_health(dev);
if (ret || !dev->queues)
break;
next_check_jiffies = jiffies
@@ -2588,10 +2591,7 @@ void aac_free_irq(struct aac_dev *dev)
int cpu;
cpu = cpumask_first(cpu_online_mask);
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(dev)) {
if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++)
free_irq(pci_irq_vector(dev->pdev, i),
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 372a07533026..0f277df73af0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -405,17 +405,23 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ bool set_qd_dev_type = false;
+ u8 devtype = 0;
chn = aac_logical_to_phys(sdev_channel(sdev));
tid = sdev_id(sdev);
- if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
- aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
- depth = aac->hba_map[chn][tid].qd_limit;
+ if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
+ devtype = aac->hba_map[chn][tid].devtype;
+
+ if (devtype == AAC_DEVTYPE_NATIVE_RAW)
+ depth = aac->hba_map[chn][tid].qd_limit;
+ else if (devtype == AAC_DEVTYPE_ARC_RAW)
+ set_qd_dev_type = true;
+
set_timeout = 1;
goto common_config;
}
-
if (aac->jbod && (sdev->type == TYPE_DISK))
sdev->removable = 1;
@@ -466,9 +472,26 @@ static int aac_slave_configure(struct scsi_device *sdev)
++num_lsu;
depth = (host->can_queue - num_one) / num_lsu;
+
+ if (sdev_channel(sdev) != NATIVE_CHANNEL)
+ goto common_config;
+
+ set_qd_dev_type = true;
+
}
common_config:
+
+ /*
+ * Check if SATA drive
+ */
+ if (set_qd_dev_type) {
+ if (strncmp(sdev->vendor, "ATA", 3) == 0)
+ depth = 32;
+ else
+ depth = 64;
+ }
+
/*
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
@@ -601,6 +624,56 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
return aac_do_ioctl(dev, cmd, arg);
}
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
+
+ unsigned long flags;
+ struct scsi_device *sdev = NULL;
+ struct Scsi_Host *shost = aac->scsi_host_ptr;
+ struct scsi_cmnd *scmnd = NULL;
+ struct device *ctrl_dev;
+
+ int mlcnt = 0;
+ int llcnt = 0;
+ int ehcnt = 0;
+ int fwcnt = 0;
+ int krlcnt = 0;
+
+ __shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_for_each_entry(scmnd, &sdev->cmd_list, list) {
+ switch (scmnd->SCp.phase) {
+ case AAC_OWNER_FIRMWARE:
+ fwcnt++;
+ break;
+ case AAC_OWNER_ERROR_HANDLER:
+ ehcnt++;
+ break;
+ case AAC_OWNER_LOWLEVEL:
+ llcnt++;
+ break;
+ case AAC_OWNER_MIDLEVEL:
+ mlcnt++;
+ break;
+ default:
+ krlcnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+
+ ctrl_dev = &aac->pdev->dev;
+
+ dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
+ dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
+ dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
+ dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+
+ return mlcnt + llcnt + ehcnt + fwcnt;
+}
+
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
@@ -661,8 +734,8 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib_callback) aac_hba_callback,
(void *) cmd);
- /* Wait up to 2 minutes for completion */
- for (count = 0; count < 120; ++count) {
+ /* Wait up to 15 secs for completion */
+ for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
ret = SUCCESS;
break;
@@ -754,6 +827,12 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
int count;
u32 bus, cid;
int ret = FAILED;
+ int status = 0;
+ __le32 supported_options2 = 0;
+ bool is_mu_reset;
+ bool is_ignore_reset;
+ bool is_doorbell_reset;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
@@ -817,8 +896,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
(fib_callback) aac_hba_callback,
(void *) cmd);
- /* Wait up to 2 minutes for completion */
- for (count = 0; count < 120; ++count) {
+ /* Wait up to 15 seconds for completion */
+ for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
ret = SUCCESS;
break;
@@ -826,12 +905,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
msleep(1000);
}
- if (ret != SUCCESS)
- pr_err("%s: Host adapter reset request timed out\n",
- AAC_DRIVERNAME);
+ if (ret == SUCCESS)
+ goto out;
+
} else {
- struct scsi_cmnd *command;
- unsigned long flags;
/* Mark the assoc. FIB to not complete, eh handler does this */
for (count = 0;
@@ -846,68 +923,42 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
+ }
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
- AAC_DRIVERNAME);
-
- count = aac_check_health(aac);
- if (count)
- return count;
- /*
- * Wait for all commands to complete to this specific
- * target (block maximum 60 seconds).
- */
- for (count = 60; count; --count) {
- int active = aac->in_reset;
-
- if (active == 0)
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list,
- list) {
- if ((command != cmd) &&
- (command->SCp.phase ==
- AAC_OWNER_FIRMWARE)) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- if (active)
- break;
+ pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- return SUCCESS;
- ssleep(1);
- }
- pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+ /*
+ * Check the health of the controller
+ */
+ status = aac_adapter_check_health(aac);
+ if (status)
+ dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
- /*
- * This adapter needs a blind reset, only do so for
- * Adapters that support a register, instead of a commanded,
- * reset.
- */
- if (((aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_MU_RESET) ||
- (aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_DOORBELL_RESET)) &&
- aac_check_reset &&
- ((aac_check_reset != 1) ||
- !(aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_IGNORE_RESET))) {
- /* Bypass wait for command quiesce */
- aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
- }
- ret = SUCCESS;
- }
+ count = get_num_of_incomplete_fibs(aac);
+ if (count == 0)
+ return SUCCESS;
+
+ /*
+ * Check if reset is supported by the firmware
+ */
+ supported_options2 = aac->supplement_adapter_info.supported_options2;
+ is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
+ is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
+ is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
/*
- * Cause an immediate retry of the command with a ten second delay
- * after successful tur
+ * This adapter needs a blind reset, only do so for
+ * Adapters that support a register, instead of a commanded,
+ * reset.
*/
+ if ((is_mu_reset || is_doorbell_reset)
+ && aac_check_reset
+ && (aac_check_reset != -1 || !is_ignore_reset)) {
+ /* Bypass wait for command quiesce */
+ aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+ }
+ ret = SUCCESS;
+
+out:
return ret;
}
@@ -1365,10 +1416,7 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- if (aac->pdev->device == PMC_DEVICE_S6 ||
- aac->pdev->device == PMC_DEVICE_S7 ||
- aac->pdev->device == PMC_DEVICE_S8 ||
- aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(aac)) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
free_irq(pci_irq_vector(aac->pdev, i),
@@ -1403,6 +1451,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
int error = -ENODEV;
int unique_id = 0;
u64 dmamask;
+ int mask_bits = 0;
extern int aac_sync_mode;
/*
@@ -1426,18 +1475,32 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
error = -ENODEV;
+ if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (error) {
+ dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
+ goto out_disable_pdev;
+ }
+ }
+
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
dmamask = DMA_BIT_MASK(31);
- else
+ mask_bits = 31;
+ } else {
dmamask = DMA_BIT_MASK(32);
+ mask_bits = 32;
+ }
- if (pci_set_dma_mask(pdev, dmamask) ||
- pci_set_consistent_dma_mask(pdev, dmamask))
+ error = pci_set_consistent_dma_mask(pdev, dmamask);
+ if (error) {
+ dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
+ , mask_bits);
goto out_disable_pdev;
+ }
pci_set_master(pdev);
@@ -1501,15 +1564,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_deinit;
}
- /*
- * If we had set a smaller DMA mask earlier, set it to 4gig
- * now since the adapter can dma data to at least a 4gig
- * address space.
- */
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
- goto out_deinit;
-
aac->maximum_num_channels = aac_drivers[index].channels;
error = aac_get_adapter_info(aac);
if (error < 0)
@@ -1627,9 +1681,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if ((dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9))
+ if (aac_is_src(dev))
aac_define_int_mode(dev);
if (dev->msi_enabled)
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7b0410e0f569..48c2b2b34b72 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -694,33 +694,52 @@ static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
-static void aac_send_iop_reset(struct aac_dev *dev, int bled)
+static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
{
- u32 var, reset_mask;
+ bool ctrl_up = true;
+ unsigned long status, start;
+ bool is_up = false;
- aac_dump_fw_fib_iop_reset(dev);
+ start = jiffies;
+ do {
+ schedule();
+ status = src_readl(dev, MUnit.OMR);
- bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
- 0, 0, 0, 0, 0, 0, &var,
- &reset_mask, NULL, NULL, NULL);
+ if (status == 0xffffffff)
+ status = 0;
- if ((bled || var != 0x00000001) && !dev->doorbell_mask)
- bled = -EINVAL;
- else if (dev->doorbell_mask) {
- reset_mask = dev->doorbell_mask;
- bled = 0;
- var = 0x00000001;
- }
+ if (status & KERNEL_BOOTING) {
+ start = jiffies;
+ continue;
+ }
+
+ if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
+ ctrl_up = false;
+ break;
+ }
+
+ is_up = status & KERNEL_UP_AND_RUNNING;
+
+ } while (!is_up);
+
+ return ctrl_up;
+}
+
+static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
+{
+ aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
+ NULL, NULL, NULL, NULL);
+}
+
+static void aac_send_iop_reset(struct aac_dev *dev)
+{
+ aac_dump_fw_fib_iop_reset(dev);
+
+ aac_notify_fw_of_iop_reset(dev);
aac_set_intx_mode(dev);
- if (!bled && (dev->supplement_adapter_info.supported_options2 &
- AAC_OPTION_DOORBELL_RESET)) {
- src_writel(dev, MUnit.IDR, reset_mask);
- } else {
- src_writel(dev, MUnit.IDR, 0x100);
- }
- msleep(30000);
+ src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
}
static void aac_send_hardware_soft_reset(struct aac_dev *dev)
@@ -735,14 +754,14 @@ static void aac_send_hardware_soft_reset(struct aac_dev *dev)
static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
- unsigned long status, start;
+ bool is_ctrl_up;
+ int ret = 0;
if (bled < 0)
goto invalid_out;
if (bled)
- pr_err("%s%d: adapter kernel panic'd %x.\n",
- dev->name, dev->id, bled);
+ dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
/*
* When there is a BlinkLED, IOP_RESET has not effect
@@ -752,48 +771,55 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- switch (reset_type) {
- case IOP_HWSOFT_RESET:
- aac_send_iop_reset(dev, bled);
+ dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
+
+ if (reset_type & HW_IOP_RESET) {
+ dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
+ aac_send_iop_reset(dev);
+
/*
- * Check to see if KERNEL_UP_AND_RUNNING
- * Wait for the adapter to be up and running.
- * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
+ * Creates a delay or wait till up and running comes thru
*/
- status = src_readl(dev, MUnit.OMR);
- if (dev->sa_firmware
- && !(status & KERNEL_UP_AND_RUNNING)) {
- start = jiffies;
- do {
- status = src_readl(dev, MUnit.OMR);
- if (time_after(jiffies,
- start+HZ*SOFT_RESET_TIME)) {
- aac_send_hardware_soft_reset(dev);
- start = jiffies;
- }
- } while (!(status & KERNEL_UP_AND_RUNNING));
+ is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+ if (!is_ctrl_up)
+ dev_err(&dev->pdev->dev, "IOP reset failed\n");
+ else {
+ dev_info(&dev->pdev->dev, "IOP reset succeded\n");
+ goto set_startup;
}
- break;
- case HW_SOFT_RESET:
- if (dev->sa_firmware) {
- aac_send_hardware_soft_reset(dev);
- aac_set_intx_mode(dev);
- }
- break;
- default:
- aac_send_iop_reset(dev, bled);
- break;
}
-invalid_out:
+ if (!dev->sa_firmware) {
+ dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
+ ret = -ENODEV;
+ goto out;
+ }
- if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
- return -ENODEV;
+ if (reset_type & HW_SOFT_RESET) {
+ dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
+ aac_send_hardware_soft_reset(dev);
+ dev->msi_enabled = 0;
+ is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+ if (!is_ctrl_up) {
+ dev_err(&dev->pdev->dev, "SOFT reset failed\n");
+ ret = -ENODEV;
+ goto out;
+ } else
+ dev_info(&dev->pdev->dev, "SOFT reset succeded\n");
+ }
+
+set_startup:
if (startup_timeout < 300)
startup_timeout = 300;
- return 0;
+out:
+ return ret;
+
+invalid_out:
+ if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+ ret = -ENODEV;
+goto out;
}
/**
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index f792420c533e..a75feebe6ad6 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -776,7 +776,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
* from/to alternative Ram.
*/
if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
- m68k_num_memory > 1) {
+ m68k_realnum_memory > 1) {
atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
if (!atari_dma_buffer) {
pr_err(PFX "can't allocate ST-RAM double buffer\n");
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index ac1c0b631aca..e4469df9c469 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1f424e40afdf..7e007e142aab 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -1,7 +1,8 @@
/* bnx2fc.h: QLogic Linux FCoE offload driver.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.10.3"
+#define BNX2FC_VERSION "2.11.8"
#define PFX "bnx2fc: "
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index 5b20efb661a5..9ed150307a39 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
index c9e0bc7fad3b..47ba3ba1e03b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 34fda3e04d27..76717acee3ab 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 68ca518d34b0..76e65a32f38c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -4,7 +4,8 @@
* and responses.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,13 +62,20 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
struct fc_els_rrq rrq;
struct bnx2fc_rport *tgt = aborted_io_req->tgt;
- struct fc_lport *lport = tgt->rdata->local_port;
+ struct fc_lport *lport = NULL;
struct bnx2fc_els_cb_arg *cb_arg = NULL;
- u32 sid = tgt->sid;
- u32 r_a_tov = lport->r_a_tov;
+ u32 sid = 0;
+ u32 r_a_tov = 0;
unsigned long start = jiffies;
int rc;
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
+ return -EINVAL;
+
+ lport = tgt->rdata->local_port;
+ sid = tgt->sid;
+ r_a_tov = lport->r_a_tov;
+
BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
aborted_io_req->xid);
memset(&rrq, 0, sizeof(rrq));
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 902722dc4ce3..7dfe709a7138 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -4,7 +4,8 @@
* FIP/FCoE packets, listen to link events etc.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -351,7 +352,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
@@ -522,10 +523,12 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fc_lport *vn_port;
- struct fcoe_port *port;
+ struct fcoe_port *port, *phys_port;
u8 *mac = NULL;
u8 *dest_mac = NULL;
struct fcoe_hdr *hp;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
@@ -561,8 +564,19 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
+ phys_port = lport_priv(lport);
+ interface = phys_port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
+
fh = fc_frame_header_get(fp);
+ if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
+ BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n",
+ dest_mac);
+ kfree_skb(skb);
+ return;
+ }
+
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
if (vn_port) {
port = lport_priv(vn_port);
@@ -572,6 +586,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
}
+ if (ctlr->state) {
+ if (!ether_addr_equal(mac, ctlr->dest_addr)) {
+ BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n",
+ mac, ctlr->dest_addr);
+ kfree_skb(skb);
+ return;
+ }
+ }
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
fh->fh_type == FC_TYPE_FCP) {
/* Drop FCP data. We dont this in L2 path */
@@ -597,6 +619,18 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
+ /*
+ * If the destination ID from the frame header does not match what we
+ * have on record for lport and the search for a NPIV port came up
+ * empty then this is not addressed to our port so simply drop it.
+ */
+ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
+ BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
+ return;
+ }
+
stats = per_cpu_ptr(lport->stats, smp_processor_id());
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
@@ -2105,6 +2139,9 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
{
struct fc_vport_identifiers vpid;
uint i, created = 0;
+ u64 wwnn = 0;
+ char wwpn_str[32];
+ char wwnn_str[32];
if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
@@ -2123,11 +2160,23 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
vpid.disable = false;
for (i = 0; i < npiv_tbl->count; i++) {
- vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+ wwnn = wwn_to_u64(npiv_tbl->wwnn[i]);
+ if (wwnn == 0) {
+ /*
+ * If we get a 0 element from for the WWNN then assume
+ * the WWNN should be the same as the physical port.
+ */
+ wwnn = lport->wwnn;
+ }
+ vpid.node_name = wwnn;
vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
"NPIV[%u]:%016llx-%016llx",
created, vpid.port_name, vpid.node_name);
+ fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str));
+ fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str));
+ BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str,
+ wwpn_str);
if (fc_vport_create(lport->host, 0, &vpid))
created++;
else
@@ -2524,6 +2573,11 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
bnx2fc_hba_destroy(hba);
}
+static void bnx2fc_rport_terminate_io(struct fc_rport *rport)
+{
+ /* This is a no-op */
+}
+
/**
* bnx2fc_fcoe_reset - Resets the fcoe
*
@@ -2860,7 +2914,7 @@ static struct fc_function_template bnx2fc_transport_function = {
.issue_fc_host_lip = bnx2fc_fcoe_reset,
- .terminate_rport_io = fc_rport_terminate_io,
+ .terminate_rport_io = bnx2fc_rport_terminate_io,
.vport_create = bnx2fc_vport_create,
.vport_delete = bnx2fc_vport_destroy,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 5ff9f89c17c7..913c750205ce 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -3,7 +3,8 @@
* with 57712 FCoE firmware.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 898461b146cc..5b6153f23f01 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -2,7 +2,8 @@
* IO manager and SCSI IO processing.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1166,16 +1167,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"not on active_q\n", io_req->xid);
/*
- * This condition can happen only due to the FW bug,
- * where we do not receive cleanup response from
- * the FW. Handle this case gracefully by erroring
- * back the IO request to SCSI-ml
+ * The IO is still with the FW.
+ * Return failure and let SCSI-ml retry eh_abort.
*/
- bnx2fc_scsi_done(io_req, DID_ABORT);
-
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
- return SUCCESS;
+ return FAILED;
}
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 739bfb62aff6..59a2dfbcbc69 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f32a66f89d25..03c104b47f31 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1909,7 +1909,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
bnx2i_ep_active_list_add(hba, bnx2i_ep);
- if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+ rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
+ if (rc)
goto del_active_ep;
mutex_unlock(&hba->net_dev_lock);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index dab195f04da7..2029ad225121 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -794,18 +794,24 @@ csio_hw_dev_ready(struct csio_hw *hw)
{
uint32_t reg;
int cnt = 6;
+ int src_pf;
while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
(--cnt != 0))
mdelay(100);
- if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
- (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ src_pf = SOURCEPF_G(reg);
+ else
+ src_pf = T6_SOURCEPF_G(reg);
+
+ if ((cnt == 0) && (((int32_t)(src_pf) < 0) ||
+ (src_pf >= CSIO_MAX_PFN))) {
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
return -EIO;
}
- hw->pfn = SOURCEPF_G(reg);
+ hw->pfn = src_pf;
return 0;
}
@@ -1581,10 +1587,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
unsigned int mtype = 0, maddr = 0;
uint32_t *cfg_data;
int value_to_add = 0;
+ const char *fw_cfg_file;
+
+ if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
+ fw_cfg_file = FW_CFG_NAME_T5;
+ else
+ fw_cfg_file = FW_CFG_NAME_T6;
- if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
+ if (request_firmware(&cf, fw_cfg_file, dev) < 0) {
csio_err(hw, "could not find config file %s, err: %d\n",
- FW_CFG_NAME_T5, ret);
+ fw_cfg_file, ret);
return -ENOENT;
}
@@ -1623,9 +1635,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
}
if (ret == 0) {
- csio_info(hw, "config file upgraded to %s\n",
- FW_CFG_NAME_T5);
- snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
+ csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
+ snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file);
}
leave:
@@ -1886,6 +1897,19 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW_CFG_NAME_T6,
+ .fw_mod_name = FW_FNAME_T6,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
};
@@ -2002,6 +2026,7 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
struct device *dev = &pci_dev->dev ;
const u8 *fw_data = NULL;
unsigned int fw_size = 0;
+ const char *fw_bin_file;
/* This is the firmware whose headers the driver was compiled
* against
@@ -2014,9 +2039,14 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
return -EINVAL;
}
- if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) {
+ if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
+ fw_bin_file = FW_FNAME_T5;
+ else
+ fw_bin_file = FW_FNAME_T6;
+
+ if (request_firmware(&fw, fw_bin_file, dev) < 0) {
csio_err(hw, "could not find firmware image %s, err: %d\n",
- FW_FNAME_T5, ret);
+ fw_bin_file, ret);
} else {
fw_data = fw->data;
fw_size = fw->size;
@@ -2038,6 +2068,17 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
return ret;
}
+static int csio_hw_check_fwver(struct csio_hw *hw)
+{
+ if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
+ (hw->fwrev < CSIO_MIN_T6_FW)) {
+ csio_hw_print_fw_version(hw, "T6 unsupported fw");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* csio_hw_configure - Configure HW
* @hw - HW module
@@ -2105,6 +2146,10 @@ csio_hw_configure(struct csio_hw *hw)
if (rv != 0)
goto out;
+ rv = csio_hw_check_fwver(hw);
+ if (rv < 0)
+ goto out;
+
/* If the firmware doesn't support Configuration Files,
* return an error.
*/
@@ -2132,6 +2177,10 @@ csio_hw_configure(struct csio_hw *hw)
}
} else {
+ rv = csio_hw_check_fwver(hw);
+ if (rv < 0)
+ goto out;
+
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
@@ -2241,9 +2290,14 @@ static void
csio_hw_intr_enable(struct csio_hw *hw)
{
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
- uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ u32 pf = 0;
uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ else
+ pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+
/*
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
* by FW, so do nothing for INTX.
@@ -2293,7 +2347,12 @@ csio_hw_intr_enable(struct csio_hw *hw)
void
csio_hw_intr_disable(struct csio_hw *hw)
{
- uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ u32 pf = 0;
+
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ else
+ pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
return;
@@ -2918,6 +2977,8 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
*/
static void csio_le_intr_handler(struct csio_hw *hw)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
+
static struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2927,7 +2988,18 @@ static void csio_le_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
+ (chip == CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
csio_hw_fatal_err(hw);
}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 62758e830d3b..9acb89538e29 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -71,6 +71,7 @@
#define CSIO_MAX_CMD_PER_LUN 32
#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
#define CSIO_MAX_SECTOR_SIZE 128
+#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */
/* Interrupts */
#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index b56a11d817be..aaabdbe11d88 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -39,11 +39,15 @@
/* Define MACRO values */
#define CSIO_HW_T5 0x5000
#define CSIO_T5_FCOE_ASIC 0x5600
+#define CSIO_HW_T6 0x6000
+#define CSIO_T6_FCOE_ASIC 0x6600
#define CSIO_HW_CHIP_MASK 0xF000
#define T5_REGMAP_SIZE (332 * 1024)
#define FW_FNAME_T5 "cxgb4/t5fw.bin"
#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+#define FW_FNAME_T6 "cxgb4/t6fw.bin"
+#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
#define CHELSIO_CHIP_FPGA 0x100
@@ -51,12 +55,17 @@
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
static inline int csio_is_t5(uint16_t chip)
@@ -64,6 +73,11 @@ static inline int csio_is_t5(uint16_t chip)
return (chip == CSIO_HW_T5);
}
+static inline int csio_is_t6(uint16_t chip)
+{
+ return (chip == CSIO_HW_T6);
+}
+
/* Define MACRO DEFINITIONS */
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 3267f4f627c9..f24def6c6fd1 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -71,27 +71,6 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
static void
csio_t5_pcie_intr_handler(struct csio_hw *hw)
{
- static struct intr_info sysbus_intr_info[] = {
- { RNPP_F, "RXNP array parity error", -1, 1 },
- { RPCP_F, "RXPC array parity error", -1, 1 },
- { RCIP_F, "RXCIF array parity error", -1, 1 },
- { RCCP_F, "Rx completions control array parity error", -1, 1 },
- { RFTP_F, "RXFT array parity error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_port_intr_info[] = {
- { TPCP_F, "TXPC array parity error", -1, 1 },
- { TNPP_F, "TXNP array parity error", -1, 1 },
- { TFTP_F, "TXFT array parity error", -1, 1 },
- { TCAP_F, "TXCA array parity error", -1, 1 },
- { TCIP_F, "TXCIF array parity error", -1, 1 },
- { RCAP_F, "RXCA array parity error", -1, 1 },
- { OTDD_F, "outbound request TLP discarded", -1, 1 },
- { RDPE_F, "Rx data parity error", -1, 1 },
- { TDUE_F, "Tx uncorrectable data error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
-
static struct intr_info pcie_intr_info[] = {
{ MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
@@ -133,13 +112,7 @@ csio_t5_pcie_intr_handler(struct csio_hw *hw)
};
int fat;
- fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
- sysbus_intr_info) +
- csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
- pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
+ fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat)
csio_hw_fatal_err(hw);
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index dbe416ff46c2..ea0c31086cc6 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -952,8 +952,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct csio_hw *hw;
struct csio_lnode *ln;
- /* probe only T5 cards */
- if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
+ /* probe only T5 and T6 cards */
+ if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) &&
+ !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK)))
return -ENODEV;
rv = csio_pci_init(pdev, &bars);
@@ -1253,3 +1254,4 @@ MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
+MODULE_FIRMWARE(FW_FNAME_T6);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 5cc5d317a442..96b31e5af91e 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -50,7 +50,7 @@
#define CSIO_DRV_AUTHOR "Chelsio Communications"
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
-#define CSIO_DRV_VERSION "1.0.0"
+#define CSIO_DRV_VERSION "1.0.0-ko"
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index c00b2ff72b55..be5ee2d37815 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len)
}
static inline void
-csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
{
+ uint16_t len;
struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+
+ if (WARN_ON(val_len > U16_MAX))
+ return;
+
+ len = val_len;
+
ae->type = htons(type);
len += 4; /* includes attribute type and length */
len = (len + 3) & ~3; /* should be multiple of 4 bytes */
ae->len = htons(len);
- memcpy(ae->value, val, len);
+ memcpy(ae->value, val, val_len);
+ if (len > val_len)
+ memset(ae->value + val_len, 0, len - val_len);
*ptr += len;
}
@@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
numattrs++;
val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
- (uint8_t *)&val,
+ &val,
FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
numattrs++;
@@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
else
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
- (uint8_t *)&val,
- FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
numattrs++;
mfs = ln->ln_sparm.csp.sp_bb_data;
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
- (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ &mfs, sizeof(mfs));
numattrs++;
strcpy(buf, "csiostor");
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
if (!csio_hostname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
attrib_blk->numattrs = htonl(numattrs);
@@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
strcpy(buf, "Chelsio Communications");
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
- hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ hw->vpd.sn, sizeof(hw->vpd.sn));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
- (uint16_t)sizeof(hw->vpd.id));
+ sizeof(hw->vpd.id));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
- hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ hw->model_desc, strlen(hw->model_desc));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
- hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ hw->hw_ver, sizeof(hw->hw_ver));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
- hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ hw->fwrev_str, strlen(hw->fwrev_str));
numattrs++;
if (!csio_osname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
- (uint8_t *)&maxpayload,
- FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
len = (uint32_t)(pld - (uint8_t *)cmd);
numattrs++;
attrib_blk->numattrs = htonl(numattrs);
@@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
int rv;
+ BUG_ON(pld_len > pld->len);
+
io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
io_req->fw_handle = (uintptr_t) (io_req);
io_req->eq_idx = mgmtm->eq_idx;
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index e8f18174f2e9..c0a17789752f 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -480,12 +480,14 @@ csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
iqp.fl0paden = 1;
iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
- iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
+ X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
iqp.fl0addr = csio_q_pstart(hw, flq_idx);
}
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 1880eb6c68f7..7b09e7ddf35e 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -354,7 +354,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
struct l2t_entry *l2t = csk->l2t;
skb_reset_transport_header(skb);
- req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
(req_completion ? F_WR_COMPL : 0));
req->wr_lo = htonl(V_WR_TID(csk->tid));
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 0aae094ab91c..a69a9ac836f5 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -644,7 +644,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
unsigned int wr_ulp_mode = 0, val;
bool imm = is_ofld_imm(skb);
- req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
+ req = __skb_push(skb, sizeof(*req));
if (imm) {
req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
@@ -806,7 +806,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
cxgbi_sock_get(csk);
csk->tid = tid;
- cxgb4_insert_tid(lldi->tids, csk, tid);
+ cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
free_atid(csk);
@@ -956,7 +956,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
if (status && status != CPL_ERR_TCAM_FULL &&
status != CPL_ERR_CONN_EXIST &&
status != CPL_ERR_ARP_MISS)
- cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
+ cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
+ csk->csk_family);
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
@@ -1590,7 +1591,8 @@ static void release_offload_resources(struct cxgbi_sock *csk)
free_atid(csk);
else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
lldi = cxgbi_cdev_priv(csk->cdev);
- cxgb4_remove_tid(lldi->tids, 0, csk->tid);
+ cxgb4_remove_tid(lldi->tids, 0, csk->tid,
+ csk->csk_family);
cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
cxgbi_sock_put(csk);
}
@@ -1606,6 +1608,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct neighbour *n = NULL;
void *daddr;
unsigned int step;
+ unsigned int rxq_idx;
unsigned int size, size6;
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1684,7 +1687,9 @@ static int init_act_open(struct cxgbi_sock *csk)
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
- csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
+ rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
+ cdev->rxq_idx_cntr++;
+ csk->rss_qid = lldi->rxq_ids[rxq_idx];
linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
csk->snd_win = cxgb4i_snd_win;
csk->rcv_win = cxgb4i_rcv_win;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 37f07aaab1e4..31a5816c2e8d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -477,6 +477,7 @@ struct cxgbi_device {
unsigned int skb_rx_extra; /* for msg coalesced mode */
unsigned int tx_max_size;
unsigned int rx_max_size;
+ unsigned int rxq_idx_cntr;
struct cxgbi_ports_map pmap;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 256af819377d..6d95e8e147e0 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -15,6 +15,8 @@
#ifndef _CXLFLASH_COMMON_H
#define _CXLFLASH_COMMON_H
+#include <linux/async.h>
+#include <linux/cdev.h>
#include <linux/irq_poll.h>
#include <linux/list.h>
#include <linux/rwsem.h>
@@ -85,7 +87,8 @@ enum cxlflash_init_state {
INIT_STATE_NONE,
INIT_STATE_PCI,
INIT_STATE_AFU,
- INIT_STATE_SCSI
+ INIT_STATE_SCSI,
+ INIT_STATE_CDEV
};
enum cxlflash_state {
@@ -115,6 +118,8 @@ struct cxlflash_cfg {
struct pci_device_id *dev_id;
struct Scsi_Host *host;
int num_fc_ports;
+ struct cdev cdev;
+ struct device *chardev;
ulong cxlflash_regs_pci;
@@ -142,8 +147,10 @@ struct cxlflash_cfg {
wait_queue_head_t tmf_waitq;
spinlock_t tmf_slock;
bool tmf_active;
+ bool ws_unmap; /* Write-same unmap supported */
wait_queue_head_t reset_waitq;
enum cxlflash_state state;
+ async_cookie_t async_reset_cookie;
};
struct afu_cmd {
@@ -155,7 +162,10 @@ struct afu_cmd {
struct list_head queue;
u32 hwq_index;
- u8 cmd_tmf:1;
+ u8 cmd_tmf:1,
+ cmd_aborted:1;
+
+ struct list_head list; /* Pending commands link */
/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
* However for performance reasons the IOARCB/IOASA should be
@@ -168,12 +178,20 @@ static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
}
+static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ INIT_LIST_HEAD(&afuc->queue);
+ return afuc;
+}
+
static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
{
struct afu_cmd *afuc = sc_to_afuc(sc);
memset(afuc, 0, sizeof(*afuc));
- return afuc;
+ return sc_to_afuci(sc);
}
struct hwq {
@@ -191,9 +209,10 @@ struct hwq {
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
u32 index; /* Index of this hwq */
+ struct list_head pending_cmds; /* Commands pending completion */
atomic_t hsq_credits;
- spinlock_t hsq_slock;
+ spinlock_t hsq_slock; /* Hardware send queue lock */
struct sisl_ioarcb *hsq_start;
struct sisl_ioarcb *hsq_end;
struct sisl_ioarcb *hsq_curr;
@@ -204,7 +223,6 @@ struct hwq {
bool toggle;
s64 room;
- spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
struct irq_poll irqpoll;
} __aligned(cache_line_size());
@@ -212,7 +230,7 @@ struct hwq {
struct afu {
struct hwq hwqs[CXLFLASH_MAX_HWQS];
int (*send_cmd)(struct afu *, struct afu_cmd *);
- void (*context_reset)(struct afu_cmd *);
+ int (*context_reset)(struct hwq *);
/* AFU HW */
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -245,21 +263,31 @@ static inline bool afu_is_irqpoll_enabled(struct afu *afu)
return !!afu->irqpoll_weight;
}
-static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
+static inline bool afu_has_cap(struct afu *afu, u64 cap)
{
u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
- return afu_cap & cmd_mode;
+ return afu_cap & cap;
+}
+
+static inline bool afu_is_afu_debug(struct afu *afu)
+{
+ return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
+}
+
+static inline bool afu_is_lun_provision(struct afu *afu)
+{
+ return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
}
static inline bool afu_is_sq_cmd_mode(struct afu *afu)
{
- return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
+ return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
}
static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
{
- return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
+ return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
}
static inline u64 lun_to_lunid(u64 lun)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index a7d57c343492..077f62e208aa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -34,6 +34,10 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
+static struct class *cxlflash_class;
+static u32 cxlflash_major;
+static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
+
/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
@@ -151,9 +155,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
* cmd_complete() - command completion handler
* @cmd: AFU command that has completed.
*
- * Prepares and submits command that has either completed or timed out to
- * the SCSI stack. Checks AFU command back into command pool for non-internal
- * (cmd->scp populated) commands.
+ * For SCSI commands this routine prepares and submits commands that have
+ * either completed or timed out to the SCSI stack. For internal commands
+ * (TMF or AFU), this routine simply notifies the originator that the
+ * command has completed.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -162,7 +167,11 @@ static void cmd_complete(struct afu_cmd *cmd)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- bool cmd_is_tmf;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
+
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ list_del(&cmd->list);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
if (cmd->scp) {
scp = cmd->scp;
@@ -171,73 +180,124 @@ static void cmd_complete(struct afu_cmd *cmd)
else
scp->result = (DID_OK << 16);
- cmd_is_tmf = cmd->cmd_tmf;
-
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
-
scp->scsi_done(scp);
-
- if (cmd_is_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- }
+ } else if (cmd->cmd_tmf) {
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+ cfg->tmf_active = false;
+ wake_up_all_locked(&cfg->tmf_waitq);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
} else
complete(&cmd->cevent);
}
/**
- * context_reset() - reset command owner context via specified register
- * @cmd: AFU command that timed out.
+ * flush_pending_cmds() - flush all pending commands on this hardware queue
+ * @hwq: Hardware queue to flush.
+ *
+ * The hardware send queue lock associated with this hardware queue must be
+ * held when calling this routine.
+ */
+static void flush_pending_cmds(struct hwq *hwq)
+{
+ struct cxlflash_cfg *cfg = hwq->afu->parent;
+ struct afu_cmd *cmd, *tmp;
+ struct scsi_cmnd *scp;
+ ulong lock_flags;
+
+ list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
+ /* Bypass command when on a doneq, cmd_complete() will handle */
+ if (!list_empty(&cmd->queue))
+ continue;
+
+ list_del(&cmd->list);
+
+ if (cmd->scp) {
+ scp = cmd->scp;
+ scp->result = (DID_IMM_RETRY << 16);
+ scp->scsi_done(scp);
+ } else {
+ cmd->cmd_aborted = true;
+
+ if (cmd->cmd_tmf) {
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+ cfg->tmf_active = false;
+ wake_up_all_locked(&cfg->tmf_waitq);
+ spin_unlock_irqrestore(&cfg->tmf_slock,
+ lock_flags);
+ } else
+ complete(&cmd->cevent);
+ }
+ }
+}
+
+/**
+ * context_reset() - reset context via specified register
+ * @hwq: Hardware queue owning the context to be reset.
* @reset_reg: MMIO register to perform reset.
+ *
+ * When the reset is successful, the SISLite specification guarantees that
+ * the AFU has aborted all currently pending I/O. Accordingly, these commands
+ * must be flushed.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
+static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
{
- int nretry = 0;
- u64 rrin = 0x1;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
+ struct cxlflash_cfg *cfg = hwq->afu->parent;
struct device *dev = &cfg->dev->dev;
+ int rc = -ETIMEDOUT;
+ int nretry = 0;
+ u64 val = 0x1;
+ ulong lock_flags;
- dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
+ dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
- writeq_be(rrin, reset_reg);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+
+ writeq_be(val, reset_reg);
do {
- rrin = readq_be(reset_reg);
- if (rrin != 0x1)
+ val = readq_be(reset_reg);
+ if ((val & 0x1) == 0x0) {
+ rc = 0;
break;
+ }
+
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
- dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
- __func__, rrin, nretry);
+ if (!rc)
+ flush_pending_cmds(hwq);
+
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+
+ dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
+ __func__, rc, val, nretry);
+ return rc;
}
/**
- * context_reset_ioarrin() - reset command owner context via IOARRIN register
- * @cmd: AFU command that timed out.
+ * context_reset_ioarrin() - reset context via IOARRIN register
+ * @hwq: Hardware queue owning the context to be reset.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset_ioarrin(struct afu_cmd *cmd)
+static int context_reset_ioarrin(struct hwq *hwq)
{
- struct afu *afu = cmd->parent;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- context_reset(cmd, &hwq->host_map->ioarrin);
+ return context_reset(hwq, &hwq->host_map->ioarrin);
}
/**
- * context_reset_sq() - reset command owner context w/ SQ Context Reset register
- * @cmd: AFU command that timed out.
+ * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
+ * @hwq: Hardware queue owning the context to be reset.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset_sq(struct afu_cmd *cmd)
+static int context_reset_sq(struct hwq *hwq)
{
- struct afu *afu = cmd->parent;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- context_reset(cmd, &hwq->host_map->sq_ctx_reset);
+ return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
}
/**
@@ -261,7 +321,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
* To avoid the performance penalty of MMIO, spread the update of
* 'room' over multiple commands.
*/
- spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
if (--hwq->room < 0) {
room = readq_be(&hwq->host_map->cmd_room);
if (room <= 0) {
@@ -275,9 +335,10 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
hwq->room = room - 1;
}
+ list_add(&cmd->list, &hwq->pending_cmds);
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
- spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
@@ -315,6 +376,8 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
hwq->hsq_curr++;
else
hwq->hsq_curr = hwq->hsq_start;
+
+ list_add(&cmd->list, &hwq->pending_cmds);
writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@@ -332,8 +395,7 @@ out:
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
*
- * Return:
- * 0 on success, -1 on timeout/error
+ * Return: 0 on success, -errno on failure
*/
static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
@@ -343,15 +405,16 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout) {
- afu->context_reset(cmd);
- rc = -1;
- }
+ if (!timeout)
+ rc = -ETIMEDOUT;
+
+ if (cmd->cmd_aborted)
+ rc = -EAGAIN;
if (unlikely(cmd->sa.ioasc != 0)) {
dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -1;
+ rc = -EIO;
}
return rc;
@@ -396,25 +459,35 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
/**
* send_tmf() - sends a Task Management Function (TMF)
- * @afu: AFU to checkout from.
- * @scp: SCSI command from stack.
+ * @cfg: Internal structure associated with the host.
+ * @sdev: SCSI device destined for TMF.
* @tmfcmd: TMF command to send.
*
* Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
*/
-static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
+static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
+ u64 tmfcmd)
{
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct afu *afu = cfg->afu;
+ struct afu_cmd *cmd = NULL;
struct device *dev = &cfg->dev->dev;
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+ char *buf = NULL;
ulong lock_flags;
int rc = 0;
ulong to;
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ INIT_LIST_HEAD(&cmd->queue);
+
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -424,15 +497,14 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
cfg->tmf_active = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- cmd->scp = scp;
cmd->parent = afu;
cmd->cmd_tmf = true;
- cmd->hwq_index = hwq_index;
+ cmd->hwq_index = hwq->index;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+ cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
+ cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
SISL_REQ_FLAGS_SUP_UNDERRUN |
SISL_REQ_FLAGS_TMF_CMD);
@@ -453,12 +525,20 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
cfg->tmf_slock,
to);
if (!to) {
- cfg->tmf_active = false;
dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -1;
- }
+ rc = -ETIMEDOUT;
+ } else if (cmd->cmd_aborted) {
+ dev_err(dev, "%s: TMF aborted\n", __func__);
+ rc = -EAGAIN;
+ } else if (cmd->sa.ioasc) {
+ dev_err(dev, "%s: TMF failed ioasc=%08x\n",
+ __func__, cmd->sa.ioasc);
+ rc = -EIO;
+ }
+ cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
out:
+ kfree(buf);
return rc;
}
@@ -485,7 +565,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = shost_priv(host);
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct afu_cmd *cmd = sc_to_afuci(scp);
struct scatterlist *sg = scsi_sglist(scp);
int hwq_index = cmd_to_target_hwq(host, scp, afu);
struct hwq *hwq = get_hwq(afu, hwq_index);
@@ -585,6 +665,20 @@ static void free_mem(struct cxlflash_cfg *cfg)
}
/**
+ * cxlflash_reset_sync() - synchronizing point for asynchronous resets
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
+{
+ if (cfg->async_reset_cookie == 0)
+ return;
+
+ /* Wait until all async calls prior to this cookie have completed */
+ async_synchronize_cookie(cfg->async_reset_cookie + 1);
+ cfg->async_reset_cookie = 0;
+}
+
+/**
* stop_afu() - stops the AFU command timers and unmaps the MMIO space
* @cfg: Internal structure associated with the host.
*
@@ -600,6 +694,8 @@ static void stop_afu(struct cxlflash_cfg *cfg)
int i;
cancel_work_sync(&cfg->work_q);
+ if (!current_is_async())
+ cxlflash_reset_sync(cfg);
if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
@@ -677,6 +773,7 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq;
+ ulong lock_flags;
if (!afu) {
dev_err(dev, "%s: returning with NULL afu\n", __func__);
@@ -694,6 +791,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
if (index != PRIMARY_HWQ)
WARN_ON(cxl_release_context(hwq->ctx));
hwq->ctx = NULL;
+
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ flush_pending_cmds(hwq);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
}
/**
@@ -788,6 +889,46 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
}
/**
+ * cxlflash_get_minor() - gets the first available minor number
+ *
+ * Return: Unique minor number that can be used to create the character device.
+ */
+static int cxlflash_get_minor(void)
+{
+ int minor;
+ long bit;
+
+ bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
+ if (bit >= CXLFLASH_MAX_ADAPTERS)
+ return -1;
+
+ minor = bit & MINORMASK;
+ set_bit(minor, cxlflash_minor);
+ return minor;
+}
+
+/**
+ * cxlflash_put_minor() - releases the minor number
+ * @minor: Minor number that is no longer needed.
+ */
+static void cxlflash_put_minor(int minor)
+{
+ clear_bit(minor, cxlflash_minor);
+}
+
+/**
+ * cxlflash_release_chrdev() - release the character device for the host
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
+{
+ device_unregister(cfg->chardev);
+ cfg->chardev = NULL;
+ cdev_del(&cfg->cdev);
+ cxlflash_put_minor(MINOR(cfg->cdev.dev));
+}
+
+/**
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
@@ -822,6 +963,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
cxlflash_stop_term_user_contexts(cfg);
switch (cfg->init_state) {
+ case INIT_STATE_CDEV:
+ cxlflash_release_chrdev(cfg);
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
@@ -1690,6 +1833,18 @@ static int init_global(struct cxlflash_cfg *cfg)
SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
&hwq->ctrl_map->ctx_cap);
}
+
+ /*
+ * Determine write-same unmap support for host by evaluating the unmap
+ * sector support bit of the context control register associated with
+ * the primary hardware queue. Note that while this status is reflected
+ * in a context register, the outcome can be assumed to be host-wide.
+ */
+ hwq = get_hwq(afu, PRIMARY_HWQ);
+ reg = readq_be(&hwq->host_map->ctx_ctrl);
+ if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
+ cfg->ws_unmap = true;
+
/* Initialize heartbeat */
afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
out:
@@ -1722,7 +1877,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
hwq->hrrq_curr = hwq->hrrq_start;
hwq->toggle = 1;
+
+ /* Initialize spin locks */
spin_lock_init(&hwq->hrrq_slock);
+ spin_lock_init(&hwq->hsq_slock);
/* Initialize SQ */
if (afu_is_sq_cmd_mode(afu)) {
@@ -1731,7 +1889,6 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
hwq->hsq_curr = hwq->hsq_start;
- spin_lock_init(&hwq->hsq_slock);
atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
}
@@ -1821,6 +1978,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
hwq->afu = cfg->afu;
hwq->index = index;
+ INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
ctx = cxl_get_context(cfg->dev);
@@ -1984,7 +2142,6 @@ static int init_afu(struct cxlflash_cfg *cfg)
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
- spin_lock_init(&hwq->rrin_slock);
hwq->room = readq_be(&hwq->host_map->cmd_room);
}
@@ -2003,29 +2160,107 @@ err1:
}
/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * afu_reset() - resets the AFU
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_reset(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+ /* Stop the context before the reset. Since the context is
+ * no longer available restart it after the reset is complete
+ */
+ term_afu(cfg);
+
+ rc = init_afu(cfg);
+
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * drain_ioctls() - wait until all currently executing ioctls have completed
+ * @cfg: Internal structure associated with the host.
+ *
+ * Obtain write access to read/write semaphore that wraps ioctl
+ * handling to 'drain' ioctls currently executing.
+ */
+static void drain_ioctls(struct cxlflash_cfg *cfg)
+{
+ down_write(&cfg->ioctl_rwsem);
+ up_write(&cfg->ioctl_rwsem);
+}
+
+/**
+ * cxlflash_async_reset_host() - asynchronous host reset handler
+ * @data: Private data provided while scheduling reset.
+ * @cookie: Cookie that can be used for checkpointing.
+ */
+static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
+{
+ struct cxlflash_cfg *cfg = data;
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+ if (cfg->state != STATE_RESET) {
+ dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
+ __func__, cfg->state);
+ goto out;
+ }
+
+ drain_ioctls(cfg);
+ cxlflash_mark_contexts_error(cfg);
+ rc = afu_reset(cfg);
+ if (rc)
+ cfg->state = STATE_FAILTERM;
+ else
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->reset_waitq);
+
+out:
+ scsi_unblock_requests(cfg->host);
+}
+
+/**
+ * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+
+ if (cfg->state != STATE_NORMAL) {
+ dev_dbg(dev, "%s: Not performing reset state=%d\n",
+ __func__, cfg->state);
+ return;
+ }
+
+ cfg->state = STATE_RESET;
+ scsi_block_requests(cfg->host);
+ cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
+ cfg);
+}
+
+/**
+ * send_afu_cmd() - builds and sends an internal AFU command
* @afu: AFU associated with the host.
- * @ctx_hndl_u: Identifies context requesting sync.
- * @res_hndl_u: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
+ * @rcb: Pre-populated IOARCB describing command to send.
*
- * The AFU can only take 1 sync command at a time. This routine enforces this
- * limitation by using a mutex to provide exclusive access to the AFU during
- * the sync. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent sync operations.
+ * The AFU can only take one internal AFU command at a time. This limitation is
+ * enforced by using a mutex to provide exclusive access to the AFU during the
+ * operation. This design point requires calling threads to not be on interrupt
+ * context due to the possibility of sleeping during concurrent AFU operations.
*
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
+ * The command status is optionally passed back to the caller when the caller
+ * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
*
* Return:
- * 0 on success
- * -1 on failure
+ * 0 on success, -errno on failure
*/
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
- res_hndl_t res_hndl_u, u8 mode)
+static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
@@ -2033,6 +2268,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
char *buf = NULL;
int rc = 0;
+ int nretry = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -2043,39 +2279,52 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
mutex_lock(&sync_active);
atomic_inc(&afu->cmds_active);
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) {
dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -1;
+ rc = -ENOMEM;
goto out;
}
cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+
+retry:
+ memset(cmd, 0, sizeof(*cmd));
+ memcpy(&cmd->rcb, rcb, sizeof(*rcb));
+ INIT_LIST_HEAD(&cmd->queue);
init_completion(&cmd->cevent);
cmd->parent = afu;
cmd->hwq_index = hwq->index;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
-
- cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
- cmd->rcb.cdb[1] = mode;
- /* The cdb is aligned, no unaligned accessors required */
- *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
- *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
+ dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
+ __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ rc = -ENOBUFS;
goto out;
+ }
rc = wait_resp(afu, cmd);
- if (unlikely(rc))
- rc = -1;
+ switch (rc) {
+ case -ETIMEDOUT:
+ rc = afu->context_reset(hwq);
+ if (rc) {
+ cxlflash_schedule_async_reset(cfg);
+ break;
+ }
+ /* fall through to retry */
+ case -EAGAIN:
+ if (++nretry < 2)
+ goto retry;
+ /* fall through to exit */
+ default:
+ break;
+ }
+
+ if (rcb->ioasa)
+ *rcb->ioasa = cmd->sa;
out:
atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
@@ -2085,38 +2334,88 @@ out:
}
/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
+ * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * @afu: AFU associated with the host.
+ * @ctx: Identifies context requesting sync.
+ * @res: Identifies resource requesting sync.
+ * @mode: Type of sync to issue (lightweight, heavyweight, global).
*
- * Return: 0 on success, -errno on failure
+ * AFU sync operations are only necessary and allowed when the device is
+ * operating normally. When not operating normally, sync requests can occur as
+ * part of cleaning up resources associated with an adapter prior to removal.
+ * In this scenario, these requests are simply ignored (safe due to the AFU
+ * going away).
+ *
+ * Return:
+ * 0 on success, -errno on failure
*/
-static int afu_reset(struct cxlflash_cfg *cfg)
+int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
{
+ struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int rc = 0;
+ struct sisl_ioarcb rcb = { 0 };
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
+ dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
+ __func__, afu, ctx, res, mode);
- rc = init_afu(cfg);
+ rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_AFU_SYNC_TIMEOUT;
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
+ rcb.cdb[0] = SISL_AFU_CMD_SYNC;
+ rcb.cdb[1] = mode;
+ put_unaligned_be16(ctx, &rcb.cdb[2]);
+ put_unaligned_be32(res, &rcb.cdb[4]);
+
+ return send_afu_cmd(afu, &rcb);
}
/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
+ * cxlflash_eh_abort_handler() - abort a SCSI command
+ * @scp: SCSI command to abort.
*
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
+ * CXL Flash devices do not support a single command abort. Reset the context
+ * as per SISLite specification. Flush any pending commands in the hardware
+ * queue before the reset.
+ *
+ * Return: SUCCESS/FAILED as defined in scsi/scsi.h
*/
-static void drain_ioctls(struct cxlflash_cfg *cfg)
+static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
+ int rc = FAILED;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = shost_priv(host);
+ struct afu_cmd *cmd = sc_to_afuc(scp);
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
+
+ dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+ scp->device->channel, scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ /* When the state is not normal, another reset/reload is in progress.
+ * Return failed and the mid-layer will invoke host reset handler.
+ */
+ if (cfg->state != STATE_NORMAL) {
+ dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
+ __func__, cfg->state);
+ goto out;
+ }
+
+ rc = afu->context_reset(hwq);
+ if (unlikely(rc))
+ goto out;
+
+ rc = SUCCESS;
+
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
}
/**
@@ -2130,24 +2429,18 @@ static void drain_ioctls(struct cxlflash_cfg *cfg)
static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
{
int rc = SUCCESS;
- struct Scsi_Host *host = scp->device->host;
+ struct scsi_device *sdev = scp->device;
+ struct Scsi_Host *host = sdev->host;
struct cxlflash_cfg *cfg = shost_priv(host);
struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
int rcr = 0;
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
+ dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
+ host->host_no, sdev->channel, sdev->id, sdev->lun);
retry:
switch (cfg->state) {
case STATE_NORMAL:
- rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+ rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
if (unlikely(rcr))
rc = FAILED;
break;
@@ -2184,13 +2477,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = shost_priv(host);
struct device *dev = &cfg->dev->dev;
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+ dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
switch (cfg->state) {
case STATE_NORMAL:
@@ -2427,7 +2714,14 @@ static ssize_t lun_mode_store(struct device *dev,
static ssize_t ioctl_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+ ssize_t bytes = 0;
+
+ bytes = scnprintf(buf, PAGE_SIZE,
+ "disk: %u\n", DK_CXLFLASH_VERSION_0);
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ "host: %u\n", HT_CXLFLASH_VERSION_0);
+
+ return bytes;
}
/**
@@ -2833,6 +3127,7 @@ static struct scsi_host_template driver_template = {
.ioctl = cxlflash_ioctl,
.proc_name = CXLFLASH_NAME,
.queuecommand = cxlflash_queuecommand,
+ .eh_abort_handler = cxlflash_eh_abort_handler,
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
@@ -2923,6 +3218,397 @@ static void cxlflash_worker_thread(struct work_struct *work)
}
/**
+ * cxlflash_chr_open() - character device open handler
+ * @inode: Device inode associated with this character device.
+ * @file: File pointer for this device.
+ *
+ * Only users with admin privileges are allowed to open the character device.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_chr_open(struct inode *inode, struct file *file)
+{
+ struct cxlflash_cfg *cfg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
+ file->private_data = cfg;
+
+ return 0;
+}
+
+/**
+ * decode_hioctl() - translates encoded host ioctl to easily identifiable string
+ * @cmd: The host ioctl command to decode.
+ *
+ * Return: A string identifying the decoded host ioctl.
+ */
+static char *decode_hioctl(int cmd)
+{
+ switch (cmd) {
+ case HT_CXLFLASH_LUN_PROVISION:
+ return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * cxlflash_lun_provision() - host LUN provisioning handler
+ * @cfg: Internal structure associated with the host.
+ * @arg: Kernel copy of userspace ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
+ struct ht_cxlflash_lun_provision *lunprov)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ struct sisl_ioarcb rcb;
+ struct sisl_ioasa asa;
+ __be64 __iomem *fc_port_regs;
+ u16 port = lunprov->port;
+ u16 scmd = lunprov->hdr.subcmd;
+ u16 type;
+ u64 reg;
+ u64 size;
+ u64 lun_id;
+ int rc = 0;
+
+ if (!afu_is_lun_provision(afu)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (port >= cfg->num_fc_ports) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (scmd) {
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
+ type = SISL_AFU_LUN_PROVISION_CREATE;
+ size = lunprov->size;
+ lun_id = 0;
+ break;
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
+ type = SISL_AFU_LUN_PROVISION_DELETE;
+ size = 0;
+ lun_id = lunprov->lun_id;
+ break;
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
+ fc_port_regs = get_fc_port_regs(cfg, port);
+
+ reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
+ lunprov->max_num_luns = reg;
+ reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
+ lunprov->cur_num_luns = reg;
+ reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
+ lunprov->max_cap_port = reg;
+ reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
+ lunprov->cur_cap_port = reg;
+
+ goto out;
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(&rcb, 0, sizeof(rcb));
+ memset(&asa, 0, sizeof(asa));
+ rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ rcb.lun_id = lun_id;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_LUN_PROV_TIMEOUT;
+ rcb.ioasa = &asa;
+
+ rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
+ rcb.cdb[1] = type;
+ rcb.cdb[2] = port;
+ put_unaligned_be64(size, &rcb.cdb[8]);
+
+ rc = send_afu_cmd(afu, &rcb);
+ if (rc) {
+ dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
+ __func__, rc, asa.ioasc, asa.afu_extra);
+ goto out;
+ }
+
+ if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
+ lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
+ memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
+ }
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_afu_debug() - host AFU debug handler
+ * @cfg: Internal structure associated with the host.
+ * @arg: Kernel copy of userspace ioctl data structure.
+ *
+ * For debug requests requiring a data buffer, always provide an aligned
+ * (cache line) buffer to the AFU to appease any alignment requirements.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
+ struct ht_cxlflash_afu_debug *afu_dbg)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ struct sisl_ioarcb rcb;
+ struct sisl_ioasa asa;
+ char *buf = NULL;
+ char *kbuf = NULL;
+ void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
+ u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ u32 ulen = afu_dbg->data_len;
+ bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
+ int rc = 0;
+
+ if (!afu_is_afu_debug(afu)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (ulen) {
+ req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
+
+ if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
+ ubuf, ulen))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ kbuf = PTR_ALIGN(buf, cache_line_size());
+
+ if (is_write) {
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
+
+ rc = copy_from_user(kbuf, ubuf, ulen);
+ if (unlikely(rc))
+ goto out;
+ }
+ }
+
+ memset(&rcb, 0, sizeof(rcb));
+ memset(&asa, 0, sizeof(asa));
+
+ rcb.req_flags = req_flags;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
+ rcb.ioasa = &asa;
+
+ if (ulen) {
+ rcb.data_len = ulen;
+ rcb.data_ea = (uintptr_t)kbuf;
+ }
+
+ rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
+ memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
+ HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
+
+ rc = send_afu_cmd(afu, &rcb);
+ if (rc) {
+ dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
+ __func__, rc, asa.ioasc, asa.afu_extra);
+ goto out;
+ }
+
+ if (ulen && !is_write)
+ rc = copy_to_user(ubuf, kbuf, ulen);
+out:
+ kfree(buf);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_chr_ioctl() - character device IOCTL handler
+ * @file: File pointer for this device.
+ * @cmd: IOCTL command.
+ * @arg: Userspace ioctl data structure.
+ *
+ * A read/write semaphore is used to implement a 'drain' of currently
+ * running ioctls. The read semaphore is taken at the beginning of each
+ * ioctl thread and released upon concluding execution. Additionally the
+ * semaphore should be released and then reacquired in any ioctl execution
+ * path which will wait for an event to occur that is outside the scope of
+ * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
+ * a thread simply needs to acquire the write semaphore.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ typedef int (*hioctl) (struct cxlflash_cfg *, void *);
+
+ struct cxlflash_cfg *cfg = file->private_data;
+ struct device *dev = &cfg->dev->dev;
+ char buf[sizeof(union cxlflash_ht_ioctls)];
+ void __user *uarg = (void __user *)arg;
+ struct ht_cxlflash_hdr *hdr;
+ size_t size = 0;
+ bool known_ioctl = false;
+ int idx = 0;
+ int rc = 0;
+ hioctl do_ioctl = NULL;
+
+ static const struct {
+ size_t size;
+ hioctl ioctl;
+ } ioctl_tbl[] = { /* NOTE: order matters here */
+ { sizeof(struct ht_cxlflash_lun_provision),
+ (hioctl)cxlflash_lun_provision },
+ { sizeof(struct ht_cxlflash_afu_debug),
+ (hioctl)cxlflash_afu_debug },
+ };
+
+ /* Hold read semaphore so we can drain if needed */
+ down_read(&cfg->ioctl_rwsem);
+
+ dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
+ __func__, cmd, idx, sizeof(ioctl_tbl));
+
+ switch (cmd) {
+ case HT_CXLFLASH_LUN_PROVISION:
+ case HT_CXLFLASH_AFU_DEBUG:
+ known_ioctl = true;
+ idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
+ size = ioctl_tbl[idx].size;
+ do_ioctl = ioctl_tbl[idx].ioctl;
+
+ if (likely(do_ioctl))
+ break;
+
+ /* fall through */
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(&buf, uarg, size))) {
+ dev_err(dev, "%s: copy_from_user() fail "
+ "size=%lu cmd=%d (%s) uarg=%p\n",
+ __func__, size, cmd, decode_hioctl(cmd), uarg);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ hdr = (struct ht_cxlflash_hdr *)&buf;
+ if (hdr->version != HT_CXLFLASH_VERSION_0) {
+ dev_dbg(dev, "%s: Version %u not supported for %s\n",
+ __func__, hdr->version, decode_hioctl(cmd));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
+ dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = do_ioctl(cfg, (void *)&buf);
+ if (likely(!rc))
+ if (unlikely(copy_to_user(uarg, &buf, size))) {
+ dev_err(dev, "%s: copy_to_user() fail "
+ "size=%lu cmd=%d (%s) uarg=%p\n",
+ __func__, size, cmd, decode_hioctl(cmd), uarg);
+ rc = -EFAULT;
+ }
+
+ /* fall through to exit */
+
+out:
+ up_read(&cfg->ioctl_rwsem);
+ if (unlikely(rc && known_ioctl))
+ dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
+ __func__, decode_hioctl(cmd), cmd, rc);
+ else
+ dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
+ __func__, decode_hioctl(cmd), cmd, rc);
+ return rc;
+}
+
+/*
+ * Character device file operations
+ */
+static const struct file_operations cxlflash_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = cxlflash_chr_open,
+ .unlocked_ioctl = cxlflash_chr_ioctl,
+ .compat_ioctl = cxlflash_chr_ioctl,
+};
+
+/**
+ * init_chrdev() - initialize the character device for the host
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_chrdev(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ struct device *char_dev;
+ dev_t devno;
+ int minor;
+ int rc = 0;
+
+ minor = cxlflash_get_minor();
+ if (unlikely(minor < 0)) {
+ dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ devno = MKDEV(cxlflash_major, minor);
+ cdev_init(&cfg->cdev, &cxlflash_chr_fops);
+
+ rc = cdev_add(&cfg->cdev, devno, 1);
+ if (rc) {
+ dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
+ goto err1;
+ }
+
+ char_dev = device_create(cxlflash_class, NULL, devno,
+ NULL, "cxlflash%d", minor);
+ if (IS_ERR(char_dev)) {
+ rc = PTR_ERR(char_dev);
+ dev_err(dev, "%s: device_create failed rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ cfg->chardev = char_dev;
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err2:
+ cdev_del(&cfg->cdev);
+err1:
+ cxlflash_put_minor(minor);
+ goto out;
+}
+
+/**
* cxlflash_probe() - PCI entry point to add host
* @pdev: PCI device associated with the host.
* @dev_id: PCI device id associated with device.
@@ -3032,6 +3718,13 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_SCSI;
+ rc = init_chrdev(cfg);
+ if (rc) {
+ dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
+ goto out_remove;
+ }
+ cfg->init_state = INIT_STATE_CDEV;
+
if (wq_has_sleeper(&cfg->reset_waitq)) {
cfg->state = STATE_PROBED;
wake_up_all(&cfg->reset_waitq);
@@ -3134,6 +3827,63 @@ static void cxlflash_pci_resume(struct pci_dev *pdev)
scsi_unblock_requests(cfg->host);
}
+/**
+ * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
+ * @dev: Character device.
+ * @mode: Mode that can be used to verify access.
+ *
+ * Return: Allocated string describing the devtmpfs structure.
+ */
+static char *cxlflash_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
+}
+
+/**
+ * cxlflash_class_init() - create character device class
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_class_init(void)
+{
+ dev_t devno;
+ int rc = 0;
+
+ rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
+ if (unlikely(rc)) {
+ pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
+ goto out;
+ }
+
+ cxlflash_major = MAJOR(devno);
+
+ cxlflash_class = class_create(THIS_MODULE, "cxlflash");
+ if (IS_ERR(cxlflash_class)) {
+ rc = PTR_ERR(cxlflash_class);
+ pr_err("%s: class_create failed rc=%d\n", __func__, rc);
+ goto err;
+ }
+
+ cxlflash_class->devnode = cxlflash_devnode;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err:
+ unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
+ goto out;
+}
+
+/**
+ * cxlflash_class_exit() - destroy character device class
+ */
+static void cxlflash_class_exit(void)
+{
+ dev_t devno = MKDEV(cxlflash_major, 0);
+
+ class_destroy(cxlflash_class);
+ unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
+}
+
static const struct pci_error_handlers cxlflash_err_handler = {
.error_detected = cxlflash_pci_error_detected,
.slot_reset = cxlflash_pci_slot_reset,
@@ -3159,10 +3909,23 @@ static struct pci_driver cxlflash_driver = {
*/
static int __init init_cxlflash(void)
{
+ int rc;
+
check_sizes();
cxlflash_list_init();
+ rc = cxlflash_class_init();
+ if (unlikely(rc))
+ goto out;
- return pci_register_driver(&cxlflash_driver);
+ rc = pci_register_driver(&cxlflash_driver);
+ if (unlikely(rc))
+ goto err;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err:
+ cxlflash_class_exit();
+ goto out;
}
/**
@@ -3174,6 +3937,7 @@ static void __exit exit_cxlflash(void)
cxlflash_free_errpage();
pci_unregister_driver(&cxlflash_driver);
+ cxlflash_class_exit();
}
module_init(init_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 49657f1f409e..880e348ed5c9 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -22,6 +22,7 @@
#define CXLFLASH_NAME "cxlflash"
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
+#define CXLFLASH_MAX_ADAPTERS 32
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
@@ -40,6 +41,10 @@
/* FC defines */
#define FC_MTIP_CMDCONFIG 0x010
#define FC_MTIP_STATUS 0x018
+#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
+#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
+#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
+#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
#define FC_PNAME 0x300
#define FC_CONFIG 0x320
@@ -62,6 +67,8 @@
/* AFU command timeout values */
#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
+#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
+#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
/* AFU command room retry limit */
#define MC_ROOM_RETRY_CNT 10
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index a768360d2fa6..09daa86670fc 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,6 +72,13 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
+#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
+#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
+#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
+
+#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
+#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
+
union {
u64 reserved; /* Reserved for IOARRIN mode */
struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
@@ -156,6 +163,7 @@ struct sisl_rc {
};
#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
+#define SISL_WWID_DATA_LEN 16 /* WWID data length */
/*
* IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
@@ -167,7 +175,12 @@ struct sisl_ioasa {
u32 ioasc;
#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
};
- u32 resid;
+
+ union {
+ u32 resid;
+ u32 lunid_hi;
+ };
+
u8 port;
u8 afu_extra;
/* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
@@ -190,7 +203,14 @@ struct sisl_ioasa {
u8 scsi_extra;
u8 fc_extra;
- u8 sense_data[SISL_SENSE_DATA_LEN];
+
+ union {
+ u8 sense_data[SISL_SENSE_DATA_LEN];
+ struct {
+ u32 lunid_lo;
+ u8 wwid[SISL_WWID_DATA_LEN];
+ };
+ };
/* These fields are defined by the SISlite architecture for the
* host to use as they see fit for their implementation.
@@ -263,6 +283,7 @@ struct sisl_host_map {
__be64 rrq_end; /* write sequence: start followed by end */
__be64 cmd_room;
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
+#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
__be64 mbox_w; /* restricted use */
__be64 sq_start; /* Submission Queue (R/W): write sequence and */
__be64 sq_end; /* inclusion semantics are the same as RRQ */
@@ -392,6 +413,8 @@ struct sisl_global_regs {
#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
+#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
+#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
};
#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index fe9f17a6268b..ad0f9968ccfb 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -57,6 +57,19 @@ static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
}
/**
+ * marshal_udir_to_rele() - translate udirect to release structure
+ * @udirect: Source structure from which to translate/copy.
+ * @release: Destination structure for the translate/copy.
+ */
+static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
+ struct dk_cxlflash_release *release)
+{
+ release->hdr = udirect->hdr;
+ release->context_id = udirect->context_id;
+ release->rsrc_handle = udirect->rsrc_handle;
+}
+
+/**
* cxlflash_free_errpage() - frees resources associated with global error page
*/
void cxlflash_free_errpage(void)
@@ -622,6 +635,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
res_hndl_t rhndl = release->rsrc_handle;
int rc = 0;
+ int rcr = 0;
u64 ctxid = DECODE_CTXID(release->context_id),
rctxid = release->context_id;
@@ -686,8 +700,12 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
rhte_f1->dw = 0;
dma_wmb(); /* Make RHT entry bottom-half clearing visible */
- if (!ctxi->err_recovery_active)
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (!ctxi->err_recovery_active) {
+ rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rcr))
+ dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
+ __func__, rcr);
+ }
break;
default:
WARN(1, "Unsupported LUN mode!");
@@ -1929,6 +1947,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
+ struct dk_cxlflash_release rel = { { 0 }, 0 };
struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
@@ -1970,13 +1989,18 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
rsrc_handle = (rhte - ctxi->rht_start);
rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
- cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
last_lba = gli->max_lba;
pphys->hdr.return_flags = 0;
pphys->last_lba = last_lba;
pphys->rsrc_handle = rsrc_handle;
+ rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
+ goto err2;
+ }
+
out:
if (likely(ctxi))
put_context(ctxi);
@@ -1984,6 +2008,10 @@ out:
__func__, rsrc_handle, rc, last_lba);
return rc;
+err2:
+ marshal_udir_to_rele(pphys, &rel);
+ _cxlflash_disk_release(sdev, ctxi, &rel);
+ goto out;
err1:
cxlflash_lun_detach(gli);
goto out;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 90b5c19f81f0..bdfb93061460 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -446,6 +446,7 @@ static int write_same16(struct scsi_device *sdev,
while (left > 0) {
scsi_cmd[0] = WRITE_SAME_16;
+ scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
put_unaligned_be64(offset, &scsi_cmd[2]);
put_unaligned_be32(ws_limit < left ? ws_limit : left,
&scsi_cmd[10]);
@@ -594,7 +595,9 @@ static int grow_lxt(struct afu *afu,
rhte->lxt_cnt = my_new_size;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
/* free old lxt if reallocated */
if (lxt != lxt_old)
@@ -673,8 +676,11 @@ static int shrink_lxt(struct afu *afu,
rhte->lxt_start = lxt;
dma_wmb(); /* Make RHT entry's LXT table update visible */
- if (needs_sync)
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (needs_sync) {
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
+ }
if (needs_ws) {
/*
@@ -792,6 +798,21 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
else if (new_size < rhte->lxt_cnt)
rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
+ else {
+ /*
+ * Rare case where there is already sufficient space, just
+ * need to perform a translation sync with the AFU. This
+ * scenario likely follows a previous sync failure during
+ * a resize operation. Accordingly, perform the heavyweight
+ * form of translation sync as it is unknown which type of
+ * resize failed previously.
+ */
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
resize->hdr.return_flags = 0;
resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
@@ -1084,10 +1105,13 @@ static int clone_lxt(struct afu *afu,
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt;
+ struct sisl_lxt_entry *lxt = NULL;
+ bool locked = false;
u32 ngrps;
u64 aun; /* chunk# allocated by block allocator */
- int i, j;
+ int j;
+ int i = 0;
+ int rc = 0;
ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
@@ -1095,33 +1119,29 @@ static int clone_lxt(struct afu *afu,
/* allocate new LXTs for clone */
lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
GFP_KERNEL);
- if (unlikely(!lxt))
- return -ENOMEM;
+ if (unlikely(!lxt)) {
+ rc = -ENOMEM;
+ goto out;
+ }
/* copy over */
memcpy(lxt, rhte_src->lxt_start,
(sizeof(*lxt) * rhte_src->lxt_cnt));
- /* clone the LBAs in block allocator via ref_cnt */
+ /* clone the LBAs in block allocator via ref_cnt, note that the
+ * block allocator mutex must be held until it is established
+ * that this routine will complete without the need for a
+ * cleanup.
+ */
mutex_lock(&blka->mutex);
+ locked = true;
for (i = 0; i < rhte_src->lxt_cnt; i++) {
aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >>
- MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
-
- mutex_unlock(&blka->mutex);
- kfree(lxt);
- return -EIO;
+ rc = -EIO;
+ goto err;
}
}
- mutex_unlock(&blka->mutex);
- } else {
- lxt = NULL;
}
/*
@@ -1136,10 +1156,31 @@ static int clone_lxt(struct afu *afu,
rhte->lxt_cnt = rhte_src->lxt_cnt;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto err2;
+ }
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
+out:
+ if (locked)
+ mutex_unlock(&blka->mutex);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err2:
+ /* Reset the RHTE */
+ rhte->lxt_cnt = 0;
+ dma_wmb();
+ rhte->lxt_start = NULL;
+ dma_wmb();
+err:
+ /* free the clones already made */
+ for (j = 0; j < i; j++) {
+ aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
+ ba_free(&blka->ba_lun, aun);
+ }
+ kfree(lxt);
+ goto out;
}
/**
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index c01b47e5b55a..0962fd544401 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -57,7 +57,6 @@
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
-#define ALUA_SYNC_STPG 0x04
/* State machine flags */
#define ALUA_PG_RUN_RTPG 0x10
#define ALUA_PG_RUN_STPG 0x20
@@ -70,7 +69,6 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than
static LIST_HEAD(port_group_list);
static DEFINE_SPINLOCK(port_group_lock);
static struct workqueue_struct *kaluad_wq;
-static struct workqueue_struct *kaluad_sync_wq;
struct alua_port_group {
struct kref kref;
@@ -380,8 +378,6 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
}
spin_lock_irqsave(&pg->lock, flags);
- if (sdev->synchronous_alua)
- pg->flags |= ALUA_SYNC_STPG;
if (pg_updated)
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
@@ -785,7 +781,6 @@ static void alua_rtpg_work(struct work_struct *work)
int err = SCSI_DH_OK;
struct alua_queue_data *qdata, *tmp;
unsigned long flags;
- struct workqueue_struct *alua_wq = kaluad_wq;
spin_lock_irqsave(&pg->lock, flags);
sdev = pg->rtpg_sdev;
@@ -796,8 +791,6 @@ static void alua_rtpg_work(struct work_struct *work)
kref_put(&pg->kref, release_port_group);
return;
}
- if (pg->flags & ALUA_SYNC_STPG)
- alua_wq = kaluad_sync_wq;
pg->flags |= ALUA_PG_RUNNING;
if (pg->flags & ALUA_PG_RUN_RTPG) {
int state = pg->state;
@@ -810,7 +803,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -822,7 +815,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -839,7 +832,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->interval = 0;
pg->flags &= ~ALUA_PG_RUNNING;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -874,8 +867,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
{
int start_queue = 0;
unsigned long flags;
- struct workqueue_struct *alua_wq = kaluad_wq;
-
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return false;
@@ -900,12 +891,10 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
}
}
- if (pg->flags & ALUA_SYNC_STPG)
- alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags);
if (start_queue) {
- if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+ if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
sdev = NULL;
else
@@ -1166,16 +1155,11 @@ static int __init alua_init(void)
/* Temporary failure, bypass */
return SCSI_DH_DEV_TEMP_BUSY;
}
- kaluad_sync_wq = create_workqueue("kaluad_sync");
- if (!kaluad_sync_wq) {
- destroy_workqueue(kaluad_wq);
- return SCSI_DH_DEV_TEMP_BUSY;
- }
+
r = scsi_register_device_handler(&alua_dh);
if (r != 0) {
printk(KERN_ERR "%s: Failed to register scsi device handler",
ALUA_DH_NAME);
- destroy_workqueue(kaluad_sync_wq);
destroy_workqueue(kaluad_wq);
}
return r;
@@ -1184,7 +1168,6 @@ static int __init alua_init(void)
static void __exit alua_exit(void)
{
scsi_unregister_device_handler(&alua_dh);
- destroy_workqueue(kaluad_sync_wq);
destroy_workqueue(kaluad_wq);
}
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index bd9e31e16249..16fc380b5512 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -48,7 +48,7 @@
#include <linux/wait.h>
typedef wait_queue_head_t adpt_wait_queue_head_t;
#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_t adpt_wait_queue_t;
+typedef wait_queue_entry_t adpt_wait_queue_entry_t;
/*
* message structures
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index b6030e3edd01..1da6407ee142 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -945,8 +945,8 @@ struct esas2r_adapter {
struct list_head vrq_mds_head;
struct esas2r_mem_desc *vrq_mds;
int num_vrqs;
- struct semaphore fm_api_semaphore;
- struct semaphore fs_api_semaphore;
+ struct mutex fm_api_mutex;
+ struct mutex fs_api_mutex;
struct semaphore nvram_semaphore;
struct atto_ioctl *local_atto_ioctl;
u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 6432a50b26d8..5b14dd29b764 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -327,8 +327,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_debug("new adapter %p, name %s", a, a->name);
spin_lock_init(&a->request_lock);
spin_lock_init(&a->fw_event_lock);
- sema_init(&a->fm_api_semaphore, 1);
- sema_init(&a->fs_api_semaphore, 1);
+ mutex_init(&a->fm_api_mutex);
+ mutex_init(&a->fs_api_mutex);
sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a);
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 2d4b7f049a68..97623002908f 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -110,7 +110,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
{
struct esas2r_request *rq;
- if (down_interruptible(&a->fm_api_semaphore)) {
+ if (mutex_lock_interruptible(&a->fm_api_mutex)) {
fi->status = FI_STAT_BUSY;
return;
}
@@ -173,7 +173,7 @@ all_done:
free_req:
esas2r_free_request(a, (struct esas2r_request *)rq);
free_sem:
- up(&a->fm_api_semaphore);
+ mutex_unlock(&a->fm_api_mutex);
return;
}
@@ -1962,7 +1962,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
(struct esas2r_ioctl_fs *)a->fs_api_buffer;
/* If another flash request is already in progress, return. */
- if (down_interruptible(&a->fs_api_semaphore)) {
+ if (mutex_lock_interruptible(&a->fs_api_mutex)) {
busy:
fs->status = ATTO_STS_OUT_OF_RSRC;
return -EBUSY;
@@ -1978,7 +1978,7 @@ busy:
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_debug("esas2r_read_fs: out of requests");
- up(&a->fs_api_semaphore);
+ mutex_unlock(&a->fs_api_mutex);
goto busy;
}
@@ -2006,7 +2006,7 @@ busy:
;
dont_wait:
/* Free the request and keep going */
- up(&a->fs_api_semaphore);
+ mutex_unlock(&a->fs_api_mutex);
esas2r_free_request(a, (struct esas2r_request *)rq);
/* Pick up possible error code from above */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 90939f66bc0d..85f9a3eba387 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -519,7 +519,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
* @skb: The receive skb
* @netdev: The associated net device
* @ptype: The packet_type structure which was used to register this handler
- * @orig_dev: The original net_device the the skb was received on.
+ * @orig_dev: The original net_device the skb was received on.
* (in case dev is a bond)
*
* Returns: 0 for success
@@ -542,7 +542,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
* @skb: The receive skb
* @netdev: The associated net device
* @ptype: The packet_type structure which was used to register this handler
- * @orig_dev: The original net_device the the skb was received on.
+ * @orig_dev: The original net_device the skb was received on.
* (in case dev is a bond)
*
* Returns: 0 for success
@@ -1543,7 +1543,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp = kmap_atomic(skb_frag_page(frag))
+ frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
@@ -2258,7 +2258,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
fcoe_ctlr_device_delete(ctlr_dev);
- goto out;
+ return rc;
}
/* Make this the "master" N_Port */
@@ -2299,7 +2299,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
out_nodev:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
-out:
+
return rc;
}
@@ -2590,7 +2590,7 @@ module_exit(fcoe_exit);
* fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
* @seq: active sequence in the FLOGI or FDISC exchange
* @fp: response frame, or error encoded in a pointer (timeout)
- * @arg: pointer the the fcoe_ctlr structure
+ * @arg: pointer to the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc FLOGI response handler.
@@ -2619,7 +2619,7 @@ done:
* fcoe_logo_resp() - FCoE specific LOGO response handler
* @seq: active sequence in the LOGO exchange
* @fp: response frame, or error encoded in a pointer (timeout)
- * @arg: pointer the the fcoe_ctlr structure
+ * @arg: pointer to the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc LOGO response handler.
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 656463ff9ccb..fff6f1851dc1 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -626,7 +626,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
fh = (struct fc_frame_header *)skb->data;
op = *(u8 *)(fh + 1);
dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
- cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
+ cap = skb_push(skb, sizeof(*cap));
memset(cap, 0, sizeof(*cap));
if (lport->point_to_multipoint) {
@@ -660,8 +660,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
if (op != ELS_LS_RJT) {
dlen += sizeof(*mac);
- mac = (struct fip_mac_desc *)skb_put(skb, sizeof(*mac));
- memset(mac, 0, sizeof(*mac));
+ mac = skb_put_zero(skb, sizeof(*mac));
mac->fd_desc.fip_dtype = FIP_DT_MAC;
mac->fd_desc.fip_dlen = sizeof(*mac) / FIP_BPW;
if (dtype != FIP_DT_FLOGI && dtype != FIP_DT_FDISC) {
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index d6498fabe628..5e3d909cfc53 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -632,6 +632,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
+ getnstimeofday(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 245dcd95e11f..999fc7547560 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -65,6 +65,30 @@ void fnic_handle_link(struct work_struct *work)
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+ switch (vnic_dev_port_speed(fnic->vdev)) {
+ case DCEM_PORTSPEED_10G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
+ break;
+ case DCEM_PORTSPEED_25G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
+ break;
+ case DCEM_PORTSPEED_100G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+
if (old_link_status == fnic->link_status) {
if (!fnic->link_status) {
/* DOWN -> DOWN */
@@ -640,7 +664,7 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ eh = skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
if (eh->h_proto == htons(ETH_P_FIP)) {
@@ -1000,8 +1024,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
- sizeof(*vlan_hdr) - sizeof(*eth_hdr));
+ vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
@@ -1067,7 +1090,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (!fnic->vlan_hw_insert) {
eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
+ vlan_hdr = skb_push(skb, eth_hdr_len);
eth_hdr = (struct ethhdr *)vlan_hdr;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
@@ -1075,7 +1098,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
} else {
eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
- eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
+ eth_hdr = skb_push(skb, eth_hdr_len);
eth_hdr->h_proto = htons(ETH_P_FCOE);
fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index c35b8f1889ea..e0bc659ed71f 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -66,4 +66,13 @@ struct fnic_io_req {
struct completion *dr_done; /* completion for device reset */
};
+enum fnic_port_speeds {
+ DCEM_PORTSPEED_NONE = 0,
+ DCEM_PORTSPEED_1G = 1000,
+ DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_40G = 40000,
+ DCEM_PORTSPEED_4x10G = 41000,
+ DCEM_PORTSPEED_25G = 25000,
+ DCEM_PORTSPEED_100G = 100000,
+};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index ba58b7953263..aacadbf20b69 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -176,11 +176,21 @@ static void fnic_get_host_speed(struct Scsi_Host *shost)
/* Add in other values as they get defined in fw */
switch (port_speed) {
- case 10000:
+ case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_25G:
+ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
+ break;
+ case DCEM_PORTSPEED_100G:
+ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
+ break;
default:
- fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d048f3b5006f..6c0646d62dfb 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -466,15 +466,27 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
}
rp = rport->dd_data;
- if (!rp || rp->rp_state != RPORT_ST_READY) {
+ if (!rp || rp->rp_state == RPORT_ST_DELETE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "returning DID_NO_CONNECT for IO as rport is removed\n");
+ "rport 0x%x removed, returning DID_NO_CONNECT\n",
+ rport->port_id);
+
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = DID_NO_CONNECT<<16;
done(sc);
return 0;
}
+ if (rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
+ rport->port_id, rp->rp_state);
+
+ sc->result = DID_IMM_RETRY << 16;
+ done(sc);
+ return 0;
+ }
+
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -633,6 +645,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+ atomic64_set(&fnic->io_cmpl_skip, 0);
spin_lock_irqsave(&fnic->fnic_lock, flags);
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 88c73cccb015..e007feedbf72 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -16,6 +16,12 @@
*/
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
+
+struct stats_timestamps {
+ struct timespec last_reset_time;
+ struct timespec last_read_time;
+};
+
struct io_path_stats {
atomic64_t active_ios;
atomic64_t max_active_ios;
@@ -110,6 +116,7 @@ struct misc_stats {
};
struct fnic_stats {
+ struct stats_timestamps stats_timestamps;
struct io_path_stats io_stats;
struct abort_stats abts_stats;
struct terminate_stats term_stats;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index b5ac5381a0d7..4826f596cb31 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -219,7 +219,31 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
int buf_size = debug->buf_size;
struct timespec val1, val2;
+ getnstimeofday(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tTime\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Current time : [%ld:%ld]\n"
+ "Last stats reset time: [%ld:%ld]\n"
+ "Last stats read time: [%ld:%ld]\n"
+ "delta since last reset: [%ld:%ld]\n"
+ "delta since last read: [%ld:%ld]\n",
+ val1.tv_sec, val1.tv_nsec,
+ stats->stats_timestamps.last_reset_time.tv_sec,
+ stats->stats_timestamps.last_reset_time.tv_nsec,
+ stats->stats_timestamps.last_read_time.tv_sec,
+ stats->stats_timestamps.last_read_time.tv_nsec,
+ timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+
+ stats->stats_timestamps.last_read_time = val1;
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 374a329b91fc..d42f29a5eb65 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -6,4 +6,12 @@ config SCSI_HISI_SAS
select BLK_DEV_INTEGRITY
depends on ATA
help
- This driver supports HiSilicon's SAS HBA
+ This driver supports HiSilicon's SAS HBA, including support based
+ on platform device
+
+config SCSI_HISI_SAS_PCI
+ tristate "HiSilicon SAS on PCI bus"
+ depends on SCSI_HISI_SAS
+ depends on PCI
+ help
+ This driver supports HiSilicon's SAS HBA based on PCI device
diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile
index c6d3a1b5fcb9..24623f228510 100644
--- a/drivers/scsi/hisi_sas/Makefile
+++ b/drivers/scsi/hisi_sas/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
+obj-$(CONFIG_SCSI_HISI_SAS_PCI) += hisi_sas_v3_hw.o
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 4e28f32e90b0..a722f2bd72ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -33,10 +34,24 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
-#define HISI_SAS_STATUS_BUF_SZ \
- (sizeof(struct hisi_sas_err_record) + 1024)
-#define HISI_SAS_COMMAND_TABLE_SZ \
- (((sizeof(union hisi_sas_command_table)+3)/4)*4)
+#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
+#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
+
+#define hisi_sas_status_buf_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
+#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf)
+#define hisi_sas_status_buf_addr_dma(slot) \
+ hisi_sas_status_buf_addr(slot->buf_dma)
+
+#define hisi_sas_cmd_hdr_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, command_header))
+#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf)
+#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma)
+
+#define hisi_sas_sge_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, sge_page))
+#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf)
+#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma)
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
@@ -46,6 +61,12 @@
((type == SAS_EDGE_EXPANDER_DEVICE) || \
(type == SAS_FANOUT_EXPANDER_DEVICE))
+#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1
+#define HISI_SAS_SATA_PROTOCOL_PIO 0x2
+#define HISI_SAS_SATA_PROTOCOL_DMA 0x4
+#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
+#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
+
struct hisi_hba;
enum {
@@ -78,11 +99,11 @@ struct hisi_sas_phy {
struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
- u64 phy_type;
u64 frame_rcvd_size;
u8 frame_rcvd[32];
u8 phy_attached;
u8 reserved[3];
+ u32 phy_type;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
};
@@ -102,20 +123,23 @@ struct hisi_sas_cq {
struct hisi_sas_dq {
struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot_prep;
+ spinlock_t lock;
int wr_point;
int id;
};
struct hisi_sas_device {
- enum sas_device_type dev_type;
struct hisi_hba *hisi_hba;
struct domain_device *sas_device;
+ struct hisi_sas_dq *dq;
+ struct list_head list;
u64 attached_phy;
- u64 device_id;
atomic64_t running_req;
- struct list_head list;
- u8 dev_status;
+ enum sas_device_type dev_type;
+ int device_id;
int sata_idx;
+ u8 dev_status;
};
struct hisi_sas_slot {
@@ -129,14 +153,10 @@ struct hisi_sas_slot {
int cmplt_queue_slot;
int idx;
int abort;
+ void *buf;
+ dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
- void *status_buffer;
- dma_addr_t status_buffer_dma;
- void *command_table;
- dma_addr_t command_table_dma;
- struct hisi_sas_sge_page *sge_page;
- dma_addr_t sge_page_dma;
struct work_struct abort_slot;
struct timer_list internal_abort_timer;
};
@@ -154,9 +174,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s);
- void (*start_delivery)(struct hisi_hba *hisi_hba);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
+ void (*start_delivery)(struct hisi_sas_dq *dq);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf);
@@ -179,6 +198,8 @@ struct hisi_sas_hw {
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+ void (*dereg_device)(struct hisi_hba *hisi_hba,
+ struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
int max_command_entries;
int complete_hdr_size;
@@ -188,7 +209,10 @@ struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
- struct platform_device *pdev;
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ struct device *dev;
+
void __iomem *regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
@@ -217,12 +241,9 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- struct hisi_sas_slot *slot_prep;
- struct dma_pool *sge_page_pool;
+ struct dma_pool *buffer_pool;
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
- struct dma_pool *command_table_pool;
- struct dma_pool *status_buffer_pool;
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
void *complete_hdr[HISI_SAS_MAX_QUEUES];
@@ -334,7 +355,7 @@ struct hisi_sas_command_table_stp {
#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
struct hisi_sas_sge_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
-};
+} __aligned(16);
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
@@ -353,9 +374,31 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_ssp ssp;
struct hisi_sas_command_table_smp smp;
struct hisi_sas_command_table_stp stp;
+} __aligned(16);
+
+struct hisi_sas_status_buffer {
+ struct hisi_sas_err_record err;
+ u8 iu[1024];
+} __aligned(16);
+
+struct hisi_sas_slot_buf_table {
+ struct hisi_sas_status_buffer status_buffer;
+ union hisi_sas_command_table command_header;
+ struct hisi_sas_sge_page sge_page;
};
+extern struct scsi_transport_template *hisi_sas_stt;
+extern struct scsi_host_template *hisi_sas_sht;
+
+extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
+extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
+extern void hisi_sas_free(struct hisi_hba *hisi_hba);
+extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
+extern void hisi_sas_sata_done(struct sas_task *task,
+ struct hisi_sas_slot *slot);
+extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
+extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern int hisi_sas_remove(struct platform_device *pdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d622db502ec9..4022c3f8295f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -23,6 +23,97 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+{
+ switch (cmd) {
+ case ATA_CMD_FPDMA_WRITE:
+ case ATA_CMD_FPDMA_READ:
+ case ATA_CMD_FPDMA_RECV:
+ case ATA_CMD_FPDMA_SEND:
+ case ATA_CMD_NCQ_NON_DATA:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+
+ case ATA_CMD_DOWNLOAD_MICRO:
+ case ATA_CMD_ID_ATA:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_READ_LOG_EXT:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PMP_WRITE:
+ case ATA_CMD_WRITE_LOG_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
+ case ATA_CMD_WRITE_QUEUED:
+ case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
+ case ATA_CMD_FLUSH:
+ case ATA_CMD_FLUSH_EXT:
+ case ATA_CMD_VERIFY:
+ case ATA_CMD_VERIFY_EXT:
+ case ATA_CMD_SET_FEATURES:
+ case ATA_CMD_STANDBY:
+ case ATA_CMD_STANDBYNOW1:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ default:
+ if (direction == DMA_NONE)
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
+
+void hisi_sas_sata_done(struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
+ struct hisi_sas_status_buffer *status_buf =
+ hisi_sas_status_buf_addr_mem(slot);
+ u8 *iu = &status_buf->iu[0];
+ struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
+
+ ts->buf_valid_size = sizeof(*resp);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
+
+int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
+
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
@@ -79,7 +170,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
{
if (task) {
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -94,17 +185,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
atomic64_dec(&sas_dev->running_req);
}
- if (slot->command_table)
- dma_pool_free(hisi_hba->command_table_pool,
- slot->command_table, slot->command_table_dma);
-
- if (slot->status_buffer)
- dma_pool_free(hisi_hba->status_buffer_pool,
- slot->status_buffer, slot->status_buffer_dma);
+ if (slot->buf)
+ dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
- if (slot->sge_page)
- dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
- slot->sge_page_dma);
list_del_init(&slot->entry);
slot->task = NULL;
@@ -156,7 +239,7 @@ static void hisi_sas_slot_abort(struct work_struct *work)
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_tmf_task tmf_task;
struct scsi_lun lun;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int tag = abort_slot->idx;
unsigned long flags;
@@ -179,17 +262,18 @@ out:
task->task_done(task);
}
-static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
- int is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
+ *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
+ int *pass)
{
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
unsigned long flags;
@@ -209,7 +293,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
- dev_info(dev, "task prep: device %llu not ready\n",
+ dev_info(dev, "task prep: device %d not ready\n",
sas_dev->device_id);
else
dev_info(dev, "task prep: device %016llx not ready\n",
@@ -240,18 +324,24 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
} else
n_elem = task->num_scatter;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
device);
else
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc)
+ if (rc) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
- &dlvry_queue, &dlvry_queue_slot);
+ }
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
goto err_out_tag;
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = dq->wr_point;
slot = &hisi_hba->slot_info[slot_idx];
memset(slot, 0, sizeof(struct hisi_sas_slot));
@@ -266,24 +356,15 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
task->lldd_task = slot;
INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
- slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
- GFP_ATOMIC,
- &slot->status_buffer_dma);
- if (!slot->status_buffer) {
+ slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+ GFP_ATOMIC, &slot->buf_dma);
+ if (!slot->buf) {
rc = -ENOMEM;
goto err_out_slot_buf;
}
- memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
-
- slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
- GFP_ATOMIC,
- &slot->command_table_dma);
- if (!slot->command_table) {
- rc = -ENOMEM;
- goto err_out_status_buf;
- }
- memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+ memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
+ memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@@ -306,9 +387,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
if (rc) {
dev_err(dev, "task prep: rc = 0x%x\n", rc);
- if (slot->sge_page)
- goto err_out_sge;
- goto err_out_command_table;
+ goto err_out_buf;
}
list_add_tail(&slot->entry, &sas_dev->list);
@@ -316,26 +395,22 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_hba->slot_prep = slot;
+ dq->slot_prep = slot;
atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
-err_out_sge:
- dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
- slot->sge_page_dma);
-err_out_command_table:
- dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
- slot->command_table_dma);
-err_out_status_buf:
- dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
- slot->status_buffer_dma);
+err_out_buf:
+ dma_pool_free(hisi_hba->buffer_pool, slot->buf,
+ slot->buf_dma);
err_out_slot_buf:
/* Nothing to be done */
err_out_tag:
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
@@ -353,20 +428,23 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
u32 pass = 0;
unsigned long flags;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_dq *dq = sas_dev->dq;
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
return -EINVAL;
/* protect task_prep and start_delivery sequence */
- spin_lock_irqsave(&hisi_hba->lock, flags);
- rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
+ spin_lock_irqsave(&dq->lock, flags);
+ rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
if (rc)
dev_err(dev, "task exec: failed[%d]!\n", rc);
if (likely(pass))
- hisi_hba->hw->start_delivery(hisi_hba);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
return rc;
}
@@ -421,12 +499,16 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
spin_lock(&hisi_hba->lock);
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+ int queue = i % hisi_hba->queue_count;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
+ sas_dev->dq = dq;
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -441,7 +523,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
if (hisi_hba->hw->alloc_dev)
sas_dev = hisi_hba->hw->alloc_dev(device);
@@ -622,19 +704,28 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
}
}
+static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
+ struct domain_device *device)
+{
+ if (hisi_hba->hw->dereg_device)
+ hisi_hba->hw->dereg_device(hisi_hba, device);
+}
+
static void hisi_sas_dev_gone(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
- u64 dev_id = sas_dev->device_id;
+ struct device *dev = hisi_hba->dev;
+ int dev_id = sas_dev->device_id;
- dev_info(dev, "found dev[%lld:%x] is gone\n",
+ dev_info(dev, "found dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
+
hisi_hba->hw->free_device(hisi_hba, sas_dev);
device->lldd_dev = NULL;
memset(sas_dev, 0, sizeof(*sas_dev));
@@ -691,8 +782,13 @@ static void hisi_sas_task_done(struct sas_task *task)
static void hisi_sas_tmf_timedout(unsigned long data)
{
struct sas_task *task = (struct sas_task *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
@@ -704,7 +800,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct sas_task *task;
int res, retry;
@@ -821,7 +917,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct ata_link *link;
int rc = TMF_RESP_FUNC_FAILED;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
unsigned long flags;
@@ -879,7 +975,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
return -1;
if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
unsigned long flags;
@@ -912,7 +1008,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -961,9 +1057,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (task->dev->dev_type == SAS_SATA_DEV) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
- } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
@@ -1027,6 +1124,10 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_I_T_nexus_reset(device);
if (rc == TMF_RESP_FUNC_COMPLETE) {
@@ -1041,7 +1142,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1054,6 +1155,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
HISI_SAS_INT_ABT_DEV, 0);
if (rc == TMF_RESP_FUNC_FAILED)
goto out;
+ hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1077,7 +1179,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
}
out:
if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
+ dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
sas_dev->device_id, rc);
return rc;
}
@@ -1124,19 +1226,20 @@ static int hisi_sas_query_task(struct sas_task *task)
}
static int
-hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
+hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct sas_task *task, int abort_flag,
int task_tag)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ struct hisi_sas_dq *dq = sas_dev->dq;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
+ unsigned long flags, flags_dq;
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
return -EINVAL;
@@ -1147,14 +1250,22 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
+ spin_lock_irqsave(&hisi_hba->lock, flags);
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc)
+ if (rc) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
- &dlvry_queue, &dlvry_queue_slot);
+ }
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ spin_lock_irqsave(&dq->lock, flags_dq);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
goto err_out_tag;
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = dq->wr_point;
+
slot = &hisi_hba->slot_info[slot_idx];
memset(slot, 0, sizeof(struct hisi_sas_slot));
@@ -1181,17 +1292,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_hba->slot_prep = slot;
+ dq->slot_prep = slot;
atomic64_inc(&sas_dev->running_req);
- /* send abort command to our chip */
- hisi_hba->hw->start_delivery(hisi_hba);
+ /* send abort command to the chip */
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
return 0;
err_out_tag:
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1214,9 +1329,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int res;
- unsigned long flags;
if (!hisi_hba->hw->prep_abort)
return -EOPNOTSUPP;
@@ -1233,11 +1347,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
- /* Lock as we are alloc'ing a slot, which cannot be interrupted */
- spin_lock_irqsave(&hisi_hba->lock, flags);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
if (res) {
del_timer(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -1247,6 +1358,17 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
+ /* Internal abort timed out */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+
+ if (slot)
+ slot->task = NULL;
+ dev_err(dev, "internal task abort: timeout.\n");
+ }
+ }
+
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
res = TMF_RESP_FUNC_COMPLETE;
@@ -1259,13 +1381,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
goto exit;
}
- /* Internal abort timed out */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- dev_err(dev, "internal task abort: timeout.\n");
- }
- }
-
exit:
dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
"resp: 0x%x sts 0x%x\n",
@@ -1353,9 +1468,10 @@ void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
}
EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
-static struct scsi_transport_template *hisi_sas_stt;
+struct scsi_transport_template *hisi_sas_stt;
+EXPORT_SYMBOL_GPL(hisi_sas_stt);
-static struct scsi_host_template hisi_sas_sht = {
+static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
@@ -1375,6 +1491,8 @@ static struct scsi_host_template hisi_sas_sht = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
+struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
+EXPORT_SYMBOL_GPL(hisi_sas_sht);
static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_dev_found = hisi_sas_dev_found,
@@ -1422,10 +1540,9 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
-static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
+int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
- struct platform_device *pdev = hisi_hba->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
spin_lock_init(&hisi_hba->lock);
@@ -1468,16 +1585,9 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
}
- s = HISI_SAS_STATUS_BUF_SZ;
- hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
- dev, s, 16, 0);
- if (!hisi_hba->status_buffer_pool)
- goto err_out;
-
- s = HISI_SAS_COMMAND_TABLE_SZ;
- hisi_hba->command_table_pool = dma_pool_create("command_table",
- dev, s, 16, 0);
- if (!hisi_hba->command_table_pool)
+ s = sizeof(struct hisi_sas_slot_buf_table);
+ hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
+ if (!hisi_hba->buffer_pool)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
@@ -1512,11 +1622,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->slot_index_tags)
goto err_out;
- hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
- sizeof(struct hisi_sas_sge_page), 16, 0);
- if (!hisi_hba->sge_page_pool)
- goto err_out;
-
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
&hisi_hba->initial_fis_dma, GFP_KERNEL);
@@ -1542,10 +1647,11 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
err_out:
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(hisi_sas_alloc);
-static void hisi_sas_free(struct hisi_hba *hisi_hba)
+void hisi_sas_free(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1562,9 +1668,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->complete_hdr_dma[i]);
}
- dma_pool_destroy(hisi_hba->status_buffer_pool);
- dma_pool_destroy(hisi_hba->command_table_pool);
- dma_pool_destroy(hisi_hba->sge_page_pool);
+ dma_pool_destroy(hisi_hba->buffer_pool);
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
if (hisi_hba->itct)
@@ -1598,6 +1702,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
+EXPORT_SYMBOL_GPL(hisi_sas_free);
static void hisi_sas_rst_work_handler(struct work_struct *work)
{
@@ -1607,65 +1712,99 @@ static void hisi_sas_rst_work_handler(struct work_struct *work)
hisi_sas_controller_reset(hisi_hba);
}
-static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
- const struct hisi_sas_hw *hw)
+int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
- struct resource *res;
- struct Scsi_Host *shost;
- struct hisi_hba *hisi_hba;
- struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = hisi_hba->dev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct device_node *np = pdev ? pdev->dev.of_node : NULL;
struct clk *refclk;
- shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost) {
- dev_err(dev, "scsi host alloc failed\n");
- return NULL;
- }
- hisi_hba = shost_priv(shost);
-
- INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
- hisi_hba->hw = hw;
- hisi_hba->pdev = pdev;
- hisi_hba->shost = shost;
- SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
-
- init_timer(&hisi_hba->timer);
-
if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
- SAS_ADDR_SIZE))
- goto err_out;
+ SAS_ADDR_SIZE)) {
+ dev_err(dev, "could not get property sas-addr\n");
+ return -ENOENT;
+ }
if (np) {
+ /*
+ * These properties are only required for platform device-based
+ * controller with DT firmware.
+ */
hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
"hisilicon,sas-syscon");
- if (IS_ERR(hisi_hba->ctrl))
- goto err_out;
+ if (IS_ERR(hisi_hba->ctrl)) {
+ dev_err(dev, "could not get syscon\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-reset-reg",
- &hisi_hba->ctrl_reset_reg))
- goto err_out;
+ &hisi_hba->ctrl_reset_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-reset-reg\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
- &hisi_hba->ctrl_reset_sts_reg))
- goto err_out;
+ &hisi_hba->ctrl_reset_sts_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-reset-sts-reg\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
- &hisi_hba->ctrl_clock_ena_reg))
- goto err_out;
+ &hisi_hba->ctrl_clock_ena_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-clock-ena-reg\n");
+ return -ENOENT;
+ }
}
- refclk = devm_clk_get(&pdev->dev, NULL);
+ refclk = devm_clk_get(dev, NULL);
if (IS_ERR(refclk))
dev_dbg(dev, "no ref clk property\n");
else
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
- if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
- goto err_out;
+ if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
+ dev_err(dev, "could not get property phy-count\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "queue-count",
- &hisi_hba->queue_count))
+ &hisi_hba->queue_count)) {
+ dev_err(dev, "could not get property queue-count\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
+
+static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
+ const struct hisi_sas_hw *hw)
+{
+ struct resource *res;
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+
+ shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
+ hisi_hba = shost_priv(shost);
+
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
+ hisi_hba->hw = hw;
+ hisi_hba->dev = dev;
+ hisi_hba->platform_dev = pdev;
+ hisi_hba->shost = shost;
+ SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
+
+ init_timer(&hisi_hba->timer);
+
+ if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
@@ -1691,7 +1830,7 @@ err_out:
return NULL;
}
-static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
+void hisi_sas_init_add(struct hisi_hba *hisi_hba)
{
int i;
@@ -1700,6 +1839,7 @@ static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
hisi_hba->sas_addr,
SAS_ADDR_SIZE);
}
+EXPORT_SYMBOL_GPL(hisi_sas_init_add);
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
@@ -1743,7 +1883,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
sha->sas_ha_name = DRV_NAME;
- sha->dev = &hisi_hba->pdev->dev;
+ sha->dev = hisi_hba->dev;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fc1c1b2c1a19..08eca20b0b81 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -505,7 +505,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
struct domain_device *device = sas_dev->sas_device;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct asd_sas_port *sas_port = device->port;
@@ -571,7 +571,7 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
int i;
unsigned long end_time;
u32 val;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
for (i = 0; i < hisi_hba->n_phy; i++) {
u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL);
@@ -756,7 +756,7 @@ static void init_reg_v1_hw(struct hisi_hba *hisi_hba)
static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc;
rc = reset_hw_v1_hw(hisi_hba);
@@ -900,22 +900,17 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/**
- * This function allocates across all queues to load balance.
- * Slots are allocated from queues in a round-robin fashion.
- *
+/*
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s)
+static int
+get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
{
- struct device *dev = &hisi_hba->pdev->dev;
- struct hisi_sas_dq *dq;
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
u32 r, w;
- int queue = dev_id % hisi_hba->queue_count;
- dq = &hisi_hba->dq[queue];
w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
@@ -924,16 +919,14 @@ static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
return -EAGAIN;
}
- *q = queue;
- *s = w;
return 0;
}
-static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
+static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
- int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
- int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
- struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
@@ -946,7 +939,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
struct scatterlist *scatter,
int n_elem)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
@@ -956,13 +950,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
return -EINVAL;
}
- slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
- &slot->sge_page_dma);
- if (!slot->sge_page)
- return -ENOMEM;
-
for_each_sg(scatter, sg, n_elem, i) {
- struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
entry->addr = cpu_to_le64(sg_dma_address(sg));
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
@@ -970,7 +959,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
entry->data_off = 0;
}
- hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
@@ -983,7 +972,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1033,7 +1022,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
@@ -1114,10 +1103,11 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
if (task->ssp_task.enable_first_burst) {
fburst = (1 << 7);
dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
@@ -1154,8 +1144,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_err_record_v1 *err_record =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
@@ -1281,7 +1272,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
enum exec_status sts;
@@ -1371,8 +1362,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
{
- struct ssp_response_iu *iu = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
+ struct hisi_sas_status_buffer *status_buffer =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct ssp_response_iu *iu = (struct ssp_response_iu *)
+ &status_buffer->iu[0];
+
sas_ssp_task_response(dev, task, iu);
break;
}
@@ -1389,7 +1383,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
- slot->status_buffer +
+ hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
sg_dma_len(sg_resp));
kunmap_atomic(to);
@@ -1430,7 +1424,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
{
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int i, phy_no = sas_phy->id;
u32 irq_value, context, port_id, link_rate;
@@ -1511,7 +1505,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sha = &hisi_hba->sha;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int phy_no = sas_phy->id;
u32 irq_value;
irqreturn_t res = IRQ_HANDLED;
@@ -1538,7 +1532,7 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
{
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 irq_value, irq_mask_old;
int phy_no = sas_phy->id;
@@ -1641,7 +1635,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) {
@@ -1700,7 +1694,7 @@ static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2);
u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO);
@@ -1738,7 +1732,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = {
static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
int i, j, irq, rc, idx;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e241921bee10..551d103c27f1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -554,12 +554,6 @@ enum {
#define DIR_TO_DEVICE 2
#define DIR_RESERVED 3
-#define SATA_PROTOCOL_NONDATA 0x1
-#define SATA_PROTOCOL_PIO 0x2
-#define SATA_PROTOCOL_DMA 0x4
-#define SATA_PROTOCOL_FPDMA 0x8
-#define SATA_PROTOCOL_ATAPI 0x10
-
#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \
err_phase == 0x4 || err_phase == 0x8 ||\
err_phase == 0x6 || err_phase == 0xa)
@@ -659,7 +653,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
{
unsigned int index;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
void *bitmap = hisi_hba->sata_dev_bitmap;
index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW);
@@ -695,6 +689,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
if (sata_dev && (i & 1))
continue;
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+ int queue = i % hisi_hba->queue_count;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
@@ -702,6 +699,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
sas_dev->sata_idx = sata_idx;
+ sas_dev->dq = dq;
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -756,7 +754,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
struct domain_device *device = sas_dev->sas_device;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct domain_device *parent_dev = device->parent;
@@ -809,7 +807,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
int i;
@@ -853,7 +851,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
int i, reset_val;
u32 val;
unsigned long end_time;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
/* The mask needs to be set depending on the number of phys */
if (hisi_hba->n_phy == 9)
@@ -989,7 +987,7 @@ static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba)
static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i;
/* Global registers init */
@@ -1170,7 +1168,7 @@ static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc;
rc = reset_hw_v2_hw(hisi_hba);
@@ -1219,7 +1217,7 @@ static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
int i, max_loop = 1000;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 status, axi_status, dfx_val, dfx_tx_val;
for (i = 0; i < max_loop; i++) {
@@ -1245,7 +1243,7 @@ static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
int i, max_loop = 1000;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 status, tx_dfx0;
for (i = 0; i < max_loop; i++) {
@@ -1283,7 +1281,7 @@ static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg, axi_val, dfx0_val, txid_auto;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
/* Close axi bus. */
axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
@@ -1454,22 +1452,17 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/**
- * This function allocates across all queues to load balance.
- * Slots are allocated from queues in a round-robin fashion.
- *
+/*
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s)
+static int
+get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
{
- struct device *dev = &hisi_hba->pdev->dev;
- struct hisi_sas_dq *dq;
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
u32 r, w;
- int queue = dev_id % hisi_hba->queue_count;
- dq = &hisi_hba->dq[queue];
w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
@@ -1479,16 +1472,14 @@ static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
return -EAGAIN;
}
- *q = queue;
- *s = w;
return 0;
}
-static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
+static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
- int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
- int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
- struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
@@ -1501,7 +1492,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
struct scatterlist *scatter,
int n_elem)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
@@ -1511,13 +1503,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
return -EINVAL;
}
- slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
- &slot->sge_page_dma);
- if (!slot->sge_page)
- return -ENOMEM;
-
for_each_sg(scatter, sg, n_elem, i) {
- struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
entry->addr = cpu_to_le64(sg_dma_address(sg));
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
@@ -1525,7 +1512,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
entry->data_off = 0;
}
- hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
@@ -1538,7 +1525,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1589,7 +1576,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
@@ -1663,10 +1650,11 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!is_tmf) {
@@ -1692,20 +1680,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
return 0;
}
-static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
- struct hisi_sas_slot *slot)
-{
- struct task_status_struct *ts = &task->task_status;
- struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
- struct dev_to_host_fis *d2h = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
-
- resp->frame_len = sizeof(struct dev_to_host_fis);
- memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
-
- ts->buf_valid_size = sizeof(*resp);
-}
-
#define TRANS_TX_ERR 0
#define TRANS_RX_ERR 1
#define DMA_TX_ERR 2
@@ -1907,7 +1881,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
int err_phase)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
+ struct hisi_sas_err_record_v2 *err_record =
+ hisi_sas_status_buf_addr_mem(slot);
u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
@@ -2198,7 +2173,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
- sata_done_v2_hw(hisi_hba, task, slot);
+ hisi_sas_sata_done(task, slot);
}
break;
default:
@@ -2211,7 +2186,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
enum exec_status sts;
@@ -2296,8 +2271,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
{
- struct ssp_response_iu *iu = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
+ struct hisi_sas_status_buffer *status_buffer =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct ssp_response_iu *iu = (struct ssp_response_iu *)
+ &status_buffer->iu[0];
sas_ssp_task_response(dev, task, iu);
break;
@@ -2315,7 +2292,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
- slot->status_buffer +
+ hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
sg_dma_len(sg_resp));
kunmap_atomic(to);
@@ -2326,7 +2303,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
ts->stat = SAM_STAT_GOOD;
- sata_done_v2_hw(hisi_hba, task, slot);
+ hisi_sas_sata_done(task, slot);
break;
}
default:
@@ -2344,7 +2321,9 @@ out:
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
sts = ts->stat;
if (task->task_done)
@@ -2353,78 +2332,6 @@ out:
return sts;
}
-static u8 get_ata_protocol(u8 cmd, int direction)
-{
- switch (cmd) {
- case ATA_CMD_FPDMA_WRITE:
- case ATA_CMD_FPDMA_READ:
- case ATA_CMD_FPDMA_RECV:
- case ATA_CMD_FPDMA_SEND:
- case ATA_CMD_NCQ_NON_DATA:
- return SATA_PROTOCOL_FPDMA;
-
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_ID_ATA:
- case ATA_CMD_PMP_READ:
- case ATA_CMD_READ_LOG_EXT:
- case ATA_CMD_PIO_READ:
- case ATA_CMD_PIO_READ_EXT:
- case ATA_CMD_PMP_WRITE:
- case ATA_CMD_WRITE_LOG_EXT:
- case ATA_CMD_PIO_WRITE:
- case ATA_CMD_PIO_WRITE_EXT:
- return SATA_PROTOCOL_PIO;
-
- case ATA_CMD_DSM:
- case ATA_CMD_DOWNLOAD_MICRO_DMA:
- case ATA_CMD_PMP_READ_DMA:
- case ATA_CMD_PMP_WRITE_DMA:
- case ATA_CMD_READ:
- case ATA_CMD_READ_EXT:
- case ATA_CMD_READ_LOG_DMA_EXT:
- case ATA_CMD_READ_STREAM_DMA_EXT:
- case ATA_CMD_TRUSTED_RCV_DMA:
- case ATA_CMD_TRUSTED_SND_DMA:
- case ATA_CMD_WRITE:
- case ATA_CMD_WRITE_EXT:
- case ATA_CMD_WRITE_FUA_EXT:
- case ATA_CMD_WRITE_QUEUED:
- case ATA_CMD_WRITE_LOG_DMA_EXT:
- case ATA_CMD_WRITE_STREAM_DMA_EXT:
- return SATA_PROTOCOL_DMA;
-
- case ATA_CMD_CHK_POWER:
- case ATA_CMD_DEV_RESET:
- case ATA_CMD_EDD:
- case ATA_CMD_FLUSH:
- case ATA_CMD_FLUSH_EXT:
- case ATA_CMD_VERIFY:
- case ATA_CMD_VERIFY_EXT:
- case ATA_CMD_SET_FEATURES:
- case ATA_CMD_STANDBY:
- case ATA_CMD_STANDBYNOW1:
- return SATA_PROTOCOL_NONDATA;
- default:
- if (direction == DMA_NONE)
- return SATA_PROTOCOL_NONDATA;
- return SATA_PROTOCOL_PIO;
- }
-}
-
-static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
-}
-
static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
@@ -2465,13 +2372,14 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
+ dw1 |= (hisi_sas_get_ata_protocol(
+ task->ata_task.fis.command, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -2490,12 +2398,11 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
return rc;
}
-
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table;
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
if (likely(!task->ata_task.device_control_reg_update))
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
@@ -2578,7 +2485,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 port_id, link_rate, hard_phy_linkrate;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
@@ -2765,7 +2672,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 ent_msk, ent_tmp, irq_msk;
int phy_no = 0;
@@ -2825,7 +2732,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
static void
one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 reg_val;
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
@@ -2914,7 +2821,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
u32 irq_value)
{
u32 reg_val;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
@@ -3064,7 +2971,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
u32 irq_value, irq_msk, err_value;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
@@ -3162,13 +3069,14 @@ static void cq_tasklet_v2_hw(unsigned long val)
struct hisi_sas_complete_v2_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
if (unlikely(hisi_hba->reject_stp_links_msk))
phys_try_accept_stp_links_v2_hw(hisi_hba);
complete_queue = hisi_hba->complete_hdr[queue];
- spin_lock(&hisi_hba->lock);
+ spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@@ -3218,7 +3126,7 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
- spin_unlock(&hisi_hba->lock);
+ spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3239,7 +3147,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_initial_fis *initial_fis;
struct dev_to_host_fis *fis;
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
@@ -3341,7 +3249,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
*/
static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
int i, irq, rc, irq_map[128];
@@ -3455,7 +3363,7 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
@@ -3477,7 +3385,7 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 old_state, state;
int rc, cnt;
int phy_no;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
new file mode 100644
index 000000000000..83d2dca1c650
--- /dev/null
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -0,0 +1,1846 @@
+/*
+ * Copyright (c) 2017 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include "hisi_sas.h"
+#define DRV_NAME "hisi_sas_v3_hw"
+
+/* global registers need init*/
+#define DLVRY_QUEUE_ENABLE 0x0
+#define IOST_BASE_ADDR_LO 0x8
+#define IOST_BASE_ADDR_HI 0xc
+#define ITCT_BASE_ADDR_LO 0x10
+#define ITCT_BASE_ADDR_HI 0x14
+#define IO_BROKEN_MSG_ADDR_LO 0x18
+#define IO_BROKEN_MSG_ADDR_HI 0x1c
+#define PHY_CONTEXT 0x20
+#define PHY_STATE 0x24
+#define PHY_PORT_NUM_MA 0x28
+#define PHY_CONN_RATE 0x30
+#define AXI_AHB_CLK_CFG 0x3c
+#define ITCT_CLR 0x44
+#define ITCT_CLR_EN_OFF 16
+#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
+#define ITCT_DEV_OFF 0
+#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
+#define AXI_USER1 0x48
+#define AXI_USER2 0x4c
+#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
+#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
+#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
+#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
+#define CFG_MAX_TAG 0x68
+#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
+#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
+#define HGC_GET_ITV_TIME 0x90
+#define DEVICE_MSG_WORK_MODE 0x94
+#define OPENA_WT_CONTI_TIME 0x9c
+#define I_T_NEXUS_LOSS_TIME 0xa0
+#define MAX_CON_TIME_LIMIT_TIME 0xa4
+#define BUS_INACTIVE_LIMIT_TIME 0xa8
+#define REJECT_TO_OPEN_LIMIT_TIME 0xac
+#define CFG_AGING_TIME 0xbc
+#define HGC_DFX_CFG2 0xc0
+#define CFG_ABT_SET_QUERY_IPTT 0xd4
+#define CFG_SET_ABORTED_IPTT_OFF 0
+#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
+#define CFG_SET_ABORTED_EN_OFF 12
+#define CFG_ABT_SET_IPTT_DONE 0xd8
+#define CFG_ABT_SET_IPTT_DONE_OFF 0
+#define HGC_IOMB_PROC1_STATUS 0x104
+#define CFG_1US_TIMER_TRSH 0xcc
+#define CHNL_INT_STATUS 0x148
+#define INT_COAL_EN 0x19c
+#define OQ_INT_COAL_TIME 0x1a0
+#define OQ_INT_COAL_CNT 0x1a4
+#define ENT_INT_COAL_TIME 0x1a8
+#define ENT_INT_COAL_CNT 0x1ac
+#define OQ_INT_SRC 0x1b0
+#define OQ_INT_SRC_MSK 0x1b4
+#define ENT_INT_SRC1 0x1b8
+#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
+#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
+#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
+#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
+#define ENT_INT_SRC2 0x1bc
+#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
+#define ENT_INT_SRC3_ITC_INT_OFF 15
+#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
+#define ENT_INT_SRC_MSK1 0x1c4
+#define ENT_INT_SRC_MSK2 0x1c8
+#define ENT_INT_SRC_MSK3 0x1cc
+#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
+#define CHNL_PHYUPDOWN_INT_MSK 0x1d0
+#define CHNL_ENT_INT_MSK 0x1d4
+#define HGC_COM_INT_MSK 0x1d8
+#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_MSK 0x1ec
+#define HGC_ERR_STAT_EN 0x238
+#define DLVRY_Q_0_BASE_ADDR_LO 0x260
+#define DLVRY_Q_0_BASE_ADDR_HI 0x264
+#define DLVRY_Q_0_DEPTH 0x268
+#define DLVRY_Q_0_WR_PTR 0x26c
+#define DLVRY_Q_0_RD_PTR 0x270
+#define HYPER_STREAM_ID_EN_CFG 0xc80
+#define OQ0_INT_SRC_MSK 0xc90
+#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
+#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
+#define COMPL_Q_0_DEPTH 0x4e8
+#define COMPL_Q_0_WR_PTR 0x4ec
+#define COMPL_Q_0_RD_PTR 0x4f0
+#define AWQOS_AWCACHE_CFG 0xc84
+#define ARQOS_ARCACHE_CFG 0xc88
+
+/* phy registers requiring init */
+#define PORT_BASE (0x2000)
+#define PHY_CFG (PORT_BASE + 0x0)
+#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
+#define PHY_CFG_ENA_OFF 0
+#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
+#define PHY_CFG_DC_OPT_OFF 2
+#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
+#define PHY_CTRL (PORT_BASE + 0x14)
+#define PHY_CTRL_RESET_OFF 0
+#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
+#define SL_CFG (PORT_BASE + 0x84)
+#define SL_CONTROL (PORT_BASE + 0x94)
+#define SL_CONTROL_NOTIFY_EN_OFF 0
+#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define SL_CTA_OFF 17
+#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
+#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
+#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
+#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
+#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
+#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
+#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
+#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
+#define TXID_AUTO (PORT_BASE + 0xb8)
+#define CT3_OFF 1
+#define CT3_MSK (0x1 << CT3_OFF)
+#define TX_HARDRST_OFF 2
+#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
+#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
+#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
+#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
+#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
+#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
+#define CHL_INT0 (PORT_BASE + 0x1b4)
+#define CHL_INT0_HOTPLUG_TOUT_OFF 0
+#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
+#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
+#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
+#define CHL_INT0_SL_PHY_ENABLE_OFF 2
+#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
+#define CHL_INT0_NOT_RDY_OFF 4
+#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
+#define CHL_INT0_PHY_RDY_OFF 5
+#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
+#define CHL_INT1 (PORT_BASE + 0x1b8)
+#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
+#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
+#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
+#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
+#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
+#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
+#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
+#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
+#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
+#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
+#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
+#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
+
+/* HW dma structures */
+/* Delivery queue header */
+/* dw0 */
+#define CMD_HDR_ABORT_FLAG_OFF 0
+#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
+#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
+#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
+#define CMD_HDR_RESP_REPORT_OFF 5
+#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
+#define CMD_HDR_TLR_CTRL_OFF 6
+#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PORT_OFF 18
+#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
+#define CMD_HDR_PRIORITY_OFF 27
+#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
+#define CMD_HDR_CMD_OFF 29
+#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
+/* dw1 */
+#define CMD_HDR_UNCON_CMD_OFF 3
+#define CMD_HDR_DIR_OFF 5
+#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
+#define CMD_HDR_RESET_OFF 7
+#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
+#define CMD_HDR_VDTL_OFF 10
+#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
+#define CMD_HDR_FRAME_TYPE_OFF 11
+#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
+#define CMD_HDR_DEV_ID_OFF 16
+#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
+/* dw2 */
+#define CMD_HDR_CFL_OFF 0
+#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
+#define CMD_HDR_NCQ_TAG_OFF 10
+#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
+#define CMD_HDR_MRFL_OFF 15
+#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
+#define CMD_HDR_SG_MOD_OFF 24
+#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
+/* dw3 */
+#define CMD_HDR_IPTT_OFF 0
+#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
+/* dw6 */
+#define CMD_HDR_DIF_SGL_LEN_OFF 0
+#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
+#define CMD_HDR_DATA_SGL_LEN_OFF 16
+#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+/* dw7 */
+#define CMD_HDR_ADDR_MODE_SEL_OFF 15
+#define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
+#define CMD_HDR_ABORT_IPTT_OFF 16
+#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
+
+/* Completion header */
+/* dw0 */
+#define CMPLT_HDR_CMPLT_OFF 0
+#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
+#define CMPLT_HDR_ERROR_PHASE_OFF 2
+#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
+#define CMPLT_HDR_RSPNS_XFRD_OFF 10
+#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_ERX_OFF 12
+#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
+#define CMPLT_HDR_ABORT_STAT_OFF 13
+#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
+/* abort_stat */
+#define STAT_IO_NOT_VALID 0x1
+#define STAT_IO_NO_DEVICE 0x2
+#define STAT_IO_COMPLETE 0x3
+#define STAT_IO_ABORTED 0x4
+/* dw1 */
+#define CMPLT_HDR_IPTT_OFF 0
+#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
+#define CMPLT_HDR_DEV_ID_OFF 16
+#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
+/* dw3 */
+#define CMPLT_HDR_IO_IN_TARGET_OFF 17
+#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
+
+/* ITCT header */
+/* qw0 */
+#define ITCT_HDR_DEV_TYPE_OFF 0
+#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
+#define ITCT_HDR_VALID_OFF 2
+#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
+#define ITCT_HDR_MCR_OFF 5
+#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
+#define ITCT_HDR_VLN_OFF 9
+#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_SMP_TIMEOUT_OFF 16
+#define ITCT_HDR_AWT_CONTINUE_OFF 25
+#define ITCT_HDR_PORT_ID_OFF 28
+#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
+/* qw2 */
+#define ITCT_HDR_INLT_OFF 0
+#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
+#define ITCT_HDR_RTOLT_OFF 48
+#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+
+struct hisi_sas_complete_v3_hdr {
+ __le32 dw0;
+ __le32 dw1;
+ __le32 act;
+ __le32 dw3;
+};
+
+struct hisi_sas_err_record_v3 {
+ /* dw0 */
+ __le32 trans_tx_fail_type;
+
+ /* dw1 */
+ __le32 trans_rx_fail_type;
+
+ /* dw2 */
+ __le16 dma_tx_err_type;
+ __le16 sipc_rx_err_type;
+
+ /* dw3 */
+ __le32 dma_rx_err_type;
+};
+
+#define RX_DATA_LEN_UNDERFLOW_OFF 6
+#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+
+#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
+#define HISI_SAS_MSI_COUNT_V3_HW 32
+
+enum {
+ HISI_SAS_PHY_PHY_UPDOWN,
+ HISI_SAS_PHY_CHNL_INT,
+ HISI_SAS_PHY_INT_NR
+};
+
+#define DIR_NO_DATA 0
+#define DIR_TO_INI 1
+#define DIR_TO_DEVICE 2
+#define DIR_RESERVED 3
+
+#define CMD_IS_UNCONSTRAINT(cmd) \
+ ((cmd == ATA_CMD_READ_LOG_EXT) || \
+ (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
+ (cmd == ATA_CMD_DEV_RESET))
+
+static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl(regs);
+}
+
+static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl_relaxed(regs);
+}
+
+static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ writel(val, regs);
+}
+
+static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
+ u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ writel(val, regs);
+}
+
+static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
+ int phy_no, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ return readl(regs);
+}
+
+static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ /* Global registers init */
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
+ hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
+ hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
+ hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+
+ hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
+ hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+ hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
+ 0xa0064);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
+ 0xa0064);
+ }
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ /* Delivery queue */
+ hisi_sas_write32(hisi_hba,
+ DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+
+ /* Completion queue */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+ }
+
+ /* itct */
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->itct_dma));
+
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->itct_dma));
+
+ /* iost */
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->iost_dma));
+
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->iost_dma));
+
+ /* breakpoint */
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->breakpoint_dma));
+
+ /* SATA broken msg */
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ /* SATA initial fis */
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
+ lower_32_bits(hisi_hba->initial_fis_dma));
+
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
+ upper_32_bits(hisi_hba->initial_fis_dma));
+}
+
+static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_DC_OPT_MSK;
+ cfg |= 1 << PHY_CFG_DC_OPT_OFF;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct sas_identify_frame identify_frame;
+ u32 *identify_buffer;
+
+ memset(&identify_frame, 0, sizeof(identify_frame));
+ identify_frame.dev_type = SAS_END_DEVICE;
+ identify_frame.frame_type = 0;
+ identify_frame._un1 = 1;
+ identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
+ identify_frame.target_bits = SAS_PROTOCOL_NONE;
+ memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ identify_frame.phy_id = phy_no;
+ identify_buffer = (u32 *)(&identify_frame);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
+ __swab32(identify_buffer[0]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
+ __swab32(identify_buffer[1]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
+ __swab32(identify_buffer[2]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
+ __swab32(identify_buffer[3]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
+ __swab32(identify_buffer[4]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
+ __swab32(identify_buffer[5]));
+}
+
+static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ struct domain_device *device = sas_dev->sas_device;
+ struct device *dev = hisi_hba->dev;
+ u64 qw0, device_id = sas_dev->device_id;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+ struct domain_device *parent_dev = device->parent;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+
+ memset(itct, 0, sizeof(*itct));
+
+ /* qw0 */
+ qw0 = 0;
+ switch (sas_dev->dev_type) {
+ case SAS_END_DEVICE:
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ case SAS_SATA_DEV:
+ case SAS_SATA_PENDING:
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
+ else
+ qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ default:
+ dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+ sas_dev->dev_type);
+ }
+
+ qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
+ (device->linkrate << ITCT_HDR_MCR_OFF) |
+ (1 << ITCT_HDR_VLN_OFF) |
+ (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) |
+ (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
+ (port->id << ITCT_HDR_PORT_ID_OFF));
+ itct->qw0 = cpu_to_le64(qw0);
+
+ /* qw1 */
+ memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = __swab64(itct->sas_addr);
+
+ /* qw2 */
+ if (!dev_is_sata(device))
+ itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
+ (0x1ULL << ITCT_HDR_RTOLT_OFF));
+}
+
+static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ u64 dev_id = sas_dev->device_id;
+ struct device *dev = hisi_hba->dev;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
+ u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+
+ /* clear the itct interrupt state */
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+
+ /* clear the itct table*/
+ reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+
+ udelay(10);
+ reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
+ dev_dbg(dev, "got clear ITCT done interrupt\n");
+
+ /* invalid the itct state*/
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+ hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
+ hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
+
+ /* clear the itct */
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ }
+}
+
+static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
+ struct domain_device *device)
+{
+ struct hisi_sas_slot *slot, *slot2;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ u32 cfg_abt_set_query_iptt;
+
+ cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
+ CFG_ABT_SET_QUERY_IPTT);
+ list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
+ cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
+ cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
+ (slot->idx << CFG_SET_ABORTED_IPTT_OFF);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
+ cfg_abt_set_query_iptt);
+ }
+ cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
+ cfg_abt_set_query_iptt);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE,
+ 1 << CFG_ABT_SET_IPTT_DONE_OFF);
+}
+
+static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ init_reg_v3_hw(hisi_hba);
+
+ return 0;
+}
+
+static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg |= PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ config_id_frame_v3_hw(hisi_hba, phy_no);
+ config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
+ enable_phy_v3_hw(hisi_hba, phy_no);
+}
+
+static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ disable_phy_v3_hw(hisi_hba, phy_no);
+}
+
+static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ start_phy_v3_hw(hisi_hba, i);
+}
+
+static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ u32 txid_auto;
+
+ stop_phy_v3_hw(hisi_hba, phy_no);
+ if (phy->identify.device_type == SAS_END_DEVICE) {
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | TX_HARDRST_MSK);
+ }
+ msleep(100);
+ start_phy_v3_hw(hisi_hba, phy_no);
+}
+
+enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ start_phys_v3_hw(hisi_hba);
+}
+
+static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 sl_control;
+
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+ msleep(1);
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+}
+
+static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
+{
+ int i, bitmap = 0;
+ u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= 1 << i;
+
+ return bitmap;
+}
+
+/**
+ * The callpath to this function and upto writing the write
+ * queue pointer should be safe from interruption.
+ */
+static int
+get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
+{
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
+ u32 r, w;
+
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
+{
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
+
+ dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+ dq->wr_point);
+}
+
+static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_cmd_hdr *hdr,
+ struct scatterlist *scatter,
+ int n_elem)
+{
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
+ struct scatterlist *sg;
+ int i;
+
+ if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ n_elem);
+ return -EINVAL;
+ }
+
+ for_each_sg(scatter, sg, n_elem, i) {
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
+
+ entry->addr = cpu_to_le64(sg_dma_address(sg));
+ entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
+ entry->data_len = cpu_to_le32(sg_dma_len(sg));
+ entry->data_off = 0;
+ }
+
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
+
+ hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+
+ return 0;
+}
+
+static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot, int is_tmf,
+ struct hisi_sas_tmf_task *tmf)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_port *port = slot->port;
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+ int has_data = 0, rc, priority = is_tmf;
+ u8 *buf_cmd;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
+ (2 << CMD_HDR_TLR_CTRL_OFF) |
+ (port->id << CMD_HDR_PORT_OFF) |
+ (priority << CMD_HDR_PRIORITY_OFF) |
+ (1 << CMD_HDR_CMD_OFF)); /* ssp */
+
+ dw1 = 1 << CMD_HDR_VDTL_OFF;
+ if (is_tmf) {
+ dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
+ } else {
+ dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+ }
+
+ /* map itct entry */
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+ + 3) / 4) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
+ (2 << CMD_HDR_SG_MOD_OFF);
+ hdr->dw2 = cpu_to_le32(dw2);
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
+
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ if (!is_tmf) {
+ buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
+ memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ } else {
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct device *dev = hisi_hba->dev;
+ struct hisi_sas_port *port = slot->port;
+ struct scatterlist *sg_req, *sg_resp;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ dma_addr_t req_dma_addr;
+ unsigned int req_len, resp_len;
+ int elem, rc;
+
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ /* req */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+ req_dma_addr = sg_dma_address(sg_req);
+
+ /* resp */
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out_req;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_resp;
+ }
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
+ (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
+ (2 << CMD_HDR_CMD_OFF)); /* smp */
+
+ /* map itct entry */
+ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
+ (1 << CMD_HDR_FRAME_TYPE_OFF) |
+ (DIR_NO_DATA << CMD_HDR_DIR_OFF));
+
+ /* dw2 */
+ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ CMD_HDR_MRFL_OFF));
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
+
+ hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ return 0;
+
+err_out_resp:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+err_out_req:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ return rc;
+}
+
+static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *device = task->dev;
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ u8 *buf_cmd;
+ int has_data = 0, rc = 0, hdr_tag = 0;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+ else
+ hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+
+ switch (task->data_dir) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+
+ if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
+ (task->ata_task.fis.control & ATA_SRST))
+ dw1 |= 1 << CMD_HDR_RESET_OFF;
+
+ dw1 |= (hisi_sas_get_ata_protocol(
+ task->ata_task.fis.command, task->data_dir))
+ << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+
+ if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
+ dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
+
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ /* dw2 */
+ if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
+ }
+
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
+ 2 << CMD_HDR_SG_MOD_OFF;
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ /* dw3 */
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+
+ return 0;
+}
+
+static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ int device_id, int abort_flag, int tag_to_abort)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *dev = task->dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct hisi_sas_port *port = slot->port;
+
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+ (port->id << CMD_HDR_PORT_OFF) |
+ ((dev_is_sata(dev) ? 1:0)
+ << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort_flag
+ << CMD_HDR_ABORT_FLAG_OFF));
+
+ /* dw1 */
+ hdr->dw1 = cpu_to_le32(device_id
+ << CMD_HDR_DEV_ID_OFF);
+
+ /* dw7 */
+ hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ return 0;
+}
+
+static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int i, res = 0;
+ u32 context, port_id, link_rate, hard_phy_linkrate;
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
+
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+
+ if (port_id == 0xf) {
+ dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+
+ /* Check for SATA dev */
+ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+ if (context & (1 << phy_no)) {
+ struct hisi_sas_initial_fis *initial_fis;
+ struct dev_to_host_fis *fis;
+ u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ initial_fis = &hisi_hba->initial_fis[phy_no];
+ fis = &initial_fis->fis;
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ attached_sas_addr[0] = 0x50;
+ attached_sas_addr[7] = phy_no;
+ memcpy(sas_phy->attached_sas_addr,
+ attached_sas_addr,
+ SAS_ADDR_SIZE);
+ memcpy(sas_phy->frame_rcvd, fis,
+ sizeof(struct dev_to_host_fis));
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->identify.device_type = SAS_SATA_DEV;
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ } else {
+ u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
+ struct sas_identify_frame *id =
+ (struct sas_identify_frame *)frame_rcvd;
+
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ for (i = 0; i < 6; i++) {
+ u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
+ RX_IDAF_DWORD0 + (i * 4));
+ frame_rcvd[i] = __swab32(idaf);
+ }
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ memcpy(sas_phy->attached_sas_addr,
+ &id->sas_addr,
+ SAS_ADDR_SIZE);
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->identify.device_type = id->dev_type;
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ }
+
+ phy->port_id = port_id;
+ phy->phy_attached = 1;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_PHY_ENABLE_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
+
+ return res;
+}
+
+static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int res = 0;
+ u32 phy_state, sl_ctrl, txid_auto;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
+
+ phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+
+ sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
+ sl_ctrl&(~SL_CTA_MSK));
+
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | CT3_MSK);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
+
+ return res;
+}
+
+static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+}
+
+static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_msk;
+ int phy_no = 0;
+ irqreturn_t res = IRQ_NONE;
+
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
+ & 0x11111111;
+ while (irq_msk) {
+ if (irq_msk & 1) {
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ int rdy = phy_state & (1 << phy_no);
+
+ if (rdy) {
+ if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+ /* phy up */
+ if (phy_up_v3_hw(phy_no, hisi_hba)
+ == IRQ_HANDLED)
+ res = IRQ_HANDLED;
+ if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ /* phy bcast */
+ phy_bcast_v3_hw(phy_no, hisi_hba);
+ } else {
+ if (irq_value & CHL_INT0_NOT_RDY_MSK)
+ /* phy down */
+ if (phy_down_v3_hw(phy_no, hisi_hba)
+ == IRQ_HANDLED)
+ res = IRQ_HANDLED;
+ }
+ }
+ irq_msk >>= 4;
+ phy_no++;
+ }
+
+ return res;
+}
+
+static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = hisi_hba->dev;
+ u32 ent_msk, ent_tmp, irq_msk;
+ int phy_no = 0;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ ent_tmp = ent_msk;
+ ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
+
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
+ & 0xeeeeeeee;
+
+ while (irq_msk) {
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (4 << (phy_no * 4))) &&
+ irq_value1) {
+ if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
+ CHL_INT1_DMAC_TX_ECC_ERR_MSK))
+ panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
+ dev_name(dev), irq_value1);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
+
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+
+
+ if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
+ }
+ irq_msk &= ~(0xe << (phy_no * 4));
+ phy_no++;
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
+
+ return IRQ_HANDLED;
+}
+
+static void
+slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct hisi_sas_complete_v3_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v3_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ struct hisi_sas_err_record_v3 *record =
+ hisi_sas_status_buf_addr_mem(slot);
+ u32 dma_rx_err_type = record->dma_rx_err_type;
+ u32 trans_tx_fail_type = record->trans_tx_fail_type;
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ }
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ ts->stat = SAS_PHY_DOWN;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ }
+ hisi_sas_sata_done(task, slot);
+ break;
+ case SAS_PROTOCOL_SMP:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_device *sas_dev;
+ struct device *dev = hisi_hba->dev;
+ struct task_status_struct *ts;
+ struct domain_device *device;
+ enum exec_status sts;
+ struct hisi_sas_complete_v3_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v3_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ int aborted;
+ unsigned long flags;
+
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return -EINVAL;
+
+ ts = &task->task_status;
+ device = task->dev;
+ sas_dev = device->lldd_dev;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ memset(ts, 0, sizeof(*ts));
+ ts->resp = SAS_TASK_COMPLETE;
+ if (unlikely(aborted)) {
+ ts->stat = SAS_ABORTED_TASK;
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ return -1;
+ }
+
+ if (unlikely(!sas_dev)) {
+ dev_dbg(dev, "slot complete: port has not device\n");
+ ts->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ /*
+ * Use SAS+TMF status codes
+ */
+ switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
+ >> CMPLT_HDR_ABORT_STAT_OFF) {
+ case STAT_IO_ABORTED:
+ /* this IO has been aborted by abort command */
+ ts->stat = SAS_ABORTED_TASK;
+ goto out;
+ case STAT_IO_COMPLETE:
+ /* internal abort command complete */
+ ts->stat = TMF_RESP_FUNC_SUCC;
+ goto out;
+ case STAT_IO_NO_DEVICE:
+ ts->stat = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ case STAT_IO_NOT_VALID:
+ /*
+ * abort single IO, the controller can't find the IO
+ */
+ ts->stat = TMF_RESP_FUNC_FAILED;
+ goto out;
+ default:
+ break;
+ }
+
+ /* check for erroneous completion */
+ if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ slot_err_v3_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort))
+ return ts->stat;
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP: {
+ struct ssp_response_iu *iu =
+ hisi_sas_status_buf_addr_mem(slot) +
+ sizeof(struct hisi_sas_err_record);
+
+ sas_ssp_task_response(dev, task, iu);
+ break;
+ }
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ void *to;
+
+ ts->stat = SAM_STAT_GOOD;
+ to = kmap_atomic(sg_page(sg_resp));
+
+ dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ memcpy(to + sg_resp->offset,
+ hisi_sas_status_buf_addr_mem(slot) +
+ sizeof(struct hisi_sas_err_record),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to);
+ break;
+ }
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ ts->stat = SAM_STAT_GOOD;
+ hisi_sas_sata_done(task, slot);
+ break;
+ default:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+
+ if (!slot->port->port_attached) {
+ dev_err(dev, "slot complete: port %d has removed\n",
+ slot->port->sas_port.id);
+ ts->stat = SAS_PHY_DOWN;
+ }
+
+out:
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ sts = ts->stat;
+
+ if (task->task_done)
+ task->task_done(task);
+
+ return sts;
+}
+
+static void cq_tasklet_v3_hw(unsigned long val)
+{
+ struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct hisi_sas_itct *itct;
+ struct hisi_sas_complete_v3_hdr *complete_queue;
+ u32 rd_point = cq->rd_point, wr_point, dev_id;
+ int queue = cq->id;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
+ complete_queue = hisi_hba->complete_hdr[queue];
+
+ spin_lock(&dq->lock);
+ wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
+ (0x14 * queue));
+
+ while (rd_point != wr_point) {
+ struct hisi_sas_complete_v3_hdr *complete_hdr;
+ int iptt;
+
+ complete_hdr = &complete_queue[rd_point];
+
+ /* Check for NCQ completion */
+ if (complete_hdr->act) {
+ u32 act_tmp = complete_hdr->act;
+ int ncq_tag_count = ffs(act_tmp);
+
+ dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+ CMPLT_HDR_DEV_ID_OFF;
+ itct = &hisi_hba->itct[dev_id];
+
+ /* The NCQ tags are held in the itct header */
+ while (ncq_tag_count) {
+ __le64 *ncq_tag = &itct->qw4_15[0];
+
+ ncq_tag_count -= 1;
+ iptt = (ncq_tag[ncq_tag_count / 5]
+ >> (ncq_tag_count % 5) * 12) & 0xfff;
+
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
+
+ act_tmp &= ~(1 << ncq_tag_count);
+ ncq_tag_count = ffs(act_tmp);
+ }
+ } else {
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
+ }
+
+ if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
+ rd_point = 0;
+ }
+
+ /* update rd_point */
+ cq->rd_point = rd_point;
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ spin_unlock(&dq->lock);
+}
+
+static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ int queue = cq->id;
+
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+ tasklet_schedule(&cq->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
+ int vectors, rc;
+ int i, k;
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
+
+ vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
+ max_msi, PCI_IRQ_MSI);
+ if (vectors < max_msi) {
+ dev_err(dev, "could not allocate all msi (%d)\n", vectors);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ int_phy_up_down_bcast_v3_hw, 0,
+ DRV_NAME " phy", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_irq_vectors;
+ }
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
+ int_chnl_int_v3_hw, 0,
+ DRV_NAME " channel", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_phy_irq;
+ }
+
+ /* Init tasklets for cq only */
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ struct tasklet_struct *t = &cq->tasklet;
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
+ cq_interrupt_v3_hw, 0,
+ DRV_NAME " cq", cq);
+ if (rc) {
+ dev_err(dev,
+ "could not request cq%d interrupt, rc=%d\n",
+ i, rc);
+ rc = -ENOENT;
+ goto free_cq_irqs;
+ }
+
+ tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
+ }
+
+ return 0;
+
+free_cq_irqs:
+ for (k = 0; k < i; k++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[k];
+
+ free_irq(pci_irq_vector(pdev, k+16), cq);
+ }
+ free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+free_phy_irq:
+ free_irq(pci_irq_vector(pdev, 1), hisi_hba);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
+{
+ int rc;
+
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ rc = interrupt_init_v3_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct hisi_sas_hw hisi_sas_v3_hw = {
+ .hw_init = hisi_sas_v3_init,
+ .setup_itct = setup_itct_v3_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
+ .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
+ .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
+ .free_device = free_device_v3_hw,
+ .sl_notify = sl_notify_v3_hw,
+ .prep_ssp = prep_ssp_v3_hw,
+ .prep_smp = prep_smp_v3_hw,
+ .prep_stp = prep_ata_v3_hw,
+ .prep_abort = prep_abort_v3_hw,
+ .get_free_slot = get_free_slot_v3_hw,
+ .start_delivery = start_delivery_v3_hw,
+ .slot_complete = slot_complete_v3_hw,
+ .phys_init = phys_init_v3_hw,
+ .phy_enable = enable_phy_v3_hw,
+ .phy_disable = disable_phy_v3_hw,
+ .phy_hard_reset = phy_hard_reset_v3_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
+ .dereg_device = dereg_device_v3_hw,
+};
+
+static struct Scsi_Host *
+hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+
+ shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ if (!shost)
+ goto err_out;
+ hisi_hba = shost_priv(shost);
+
+ hisi_hba->hw = &hisi_sas_v3_hw;
+ hisi_hba->pci_dev = pdev;
+ hisi_hba->dev = dev;
+ hisi_hba->shost = shost;
+ SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
+
+ init_timer(&hisi_hba->timer);
+
+ if (hisi_sas_get_fw_info(hisi_hba) < 0)
+ goto err_out;
+
+ if (hisi_sas_alloc(hisi_hba, shost)) {
+ hisi_sas_free(hisi_hba);
+ goto err_out;
+ }
+
+ return shost;
+err_out:
+ dev_err(dev, "shost alloc failed\n");
+ return NULL;
+}
+
+static int
+hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+ struct asd_sas_phy **arr_phy;
+ struct asd_sas_port **arr_port;
+ struct sas_ha_struct *sha;
+ int rc, phy_nr, port_nr, i;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable_device;
+
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ dev_err(dev, "No usable DMA addressing method\n");
+ rc = -EIO;
+ goto err_out_regions;
+ }
+ }
+
+ shost = hisi_sas_shost_alloc_pci(pdev);
+ if (!shost) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ sha = SHOST_TO_SAS_HA(shost);
+ hisi_hba = shost_priv(shost);
+ dev_set_drvdata(dev, sha);
+
+ hisi_hba->regs = pcim_iomap(pdev, 5, 0);
+ if (!hisi_hba->regs) {
+ dev_err(dev, "cannot map register.\n");
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
+
+ phy_nr = port_nr = hisi_hba->n_phy;
+
+ arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
+ arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
+
+ sha->sas_phy = arr_phy;
+ sha->sas_port = arr_port;
+ sha->core.shost = shost;
+ sha->lldd_ha = hisi_hba;
+
+ shost->transportt = hisi_sas_stt;
+ shost->max_id = HISI_SAS_MAX_DEVICES;
+ shost->max_lun = ~0;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
+ shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+
+ sha->sas_ha_name = DRV_NAME;
+ sha->dev = dev;
+ sha->lldd_module = THIS_MODULE;
+ sha->sas_addr = &hisi_hba->sas_addr[0];
+ sha->num_phys = hisi_hba->n_phy;
+ sha->core.shost = hisi_hba->shost;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
+ sha->sas_port[i] = &hisi_hba->port[i].sas_port;
+ }
+
+ hisi_sas_init_add(hisi_hba);
+
+ rc = scsi_add_host(shost, dev);
+ if (rc)
+ goto err_out_ha;
+
+ rc = sas_register_ha(sha);
+ if (rc)
+ goto err_out_register_ha;
+
+ rc = hisi_hba->hw->hw_init(hisi_hba);
+ if (rc)
+ goto err_out_register_ha;
+
+ scsi_scan_host(shost);
+
+ return 0;
+
+err_out_register_ha:
+ scsi_remove_host(shost);
+err_out_ha:
+ kfree(shost);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out:
+ return rc;
+}
+
+static void
+hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ free_irq(pci_irq_vector(pdev, 1), hisi_hba);
+ free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ free_irq(pci_irq_vector(pdev, i+16), cq);
+ }
+ pci_free_irq_vectors(pdev);
+}
+
+static void hisi_sas_v3_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sas_ha_struct *sha = dev_get_drvdata(dev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ sas_unregister_ha(sha);
+ sas_remove_host(sha->core.shost);
+
+ hisi_sas_free(hisi_hba);
+ hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+enum {
+ /* instances of the controller */
+ hip08,
+};
+
+static const struct pci_device_id sas_v3_pci_table[] = {
+ { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
+ {}
+};
+
+static struct pci_driver sas_v3_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sas_v3_pci_table,
+ .probe = hisi_sas_v3_probe,
+ .remove = hisi_sas_v3_remove,
+};
+
+module_pci_driver(sas_v3_pci_driver);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 73daace478cb..8914eab84337 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.18-0"
+#define HPSA_DRIVER_VERSION "3.4.20-0"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -258,7 +258,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
-static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static int hpsa_slave_configure(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
@@ -326,7 +325,7 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c)
static inline bool hpsa_is_pending_event(struct CommandList *c)
{
- return c->abort_pending || c->reset_pending;
+ return c->reset_pending;
}
/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
@@ -581,12 +580,6 @@ static u32 soft_unresettable_controller[] = {
0x409D0E11, /* Smart Array 6400 EM */
};
-static u32 needs_abort_tags_swizzled[] = {
- 0x323D103C, /* Smart Array P700m */
- 0x324a103C, /* Smart Array P712m */
- 0x324b103C, /* SmartArray P711m */
-};
-
static int board_id_in_array(u32 a[], int nelems, u32 board_id)
{
int i;
@@ -615,12 +608,6 @@ static int ctlr_is_resettable(u32 board_id)
ctlr_is_soft_resettable(board_id);
}
-static int ctlr_needs_abort_tags_swizzled(u32 board_id)
-{
- return board_id_in_array(needs_abort_tags_swizzled,
- ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
-}
-
static ssize_t host_show_resettable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -928,8 +915,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
NULL,
};
-#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
- HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
+#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
+ HPSA_MAX_CONCURRENT_PASSTHRUS)
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
@@ -941,7 +928,6 @@ static struct scsi_host_template hpsa_driver_template = {
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
- .eh_abort_handler = hpsa_eh_abort_handler,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
@@ -1110,6 +1096,7 @@ static int is_firmware_flash_cmd(u8 *cdb)
*/
#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
+#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
struct CommandList *c)
{
@@ -1859,10 +1846,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h,
* A reset can cause a device status to change
* re-schedule the scan to see what happened.
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -2066,10 +2056,13 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
sd = sdev->hostdata;
sdev->no_uld_attach = !sd || !sd->expose_device;
- if (sd)
- queue_depth = sd->queue_depth != 0 ?
- sd->queue_depth : sdev->host->can_queue;
- else
+ if (sd) {
+ if (sd->external)
+ queue_depth = EXTERNAL_QD;
+ else
+ queue_depth = sd->queue_depth != 0 ?
+ sd->queue_depth : sdev->host->can_queue;
+ } else
queue_depth = sdev->host->can_queue;
scsi_change_queue_depth(sdev, queue_depth);
@@ -2354,26 +2347,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
bool do_wake = false;
/*
- * Prevent the following race in the abort handler:
- *
- * 1. LLD is requested to abort a SCSI command
- * 2. The SCSI command completes
- * 3. The struct CommandList associated with step 2 is made available
- * 4. New I/O request to LLD to another LUN re-uses struct CommandList
- * 5. Abort handler follows scsi_cmnd->host_scribble and
- * finds struct CommandList and tries to aborts it
- * Now we have aborted the wrong command.
- *
- * Reset c->scsi_cmd here so that the abort or reset handler will know
+ * Reset c->scsi_cmd here so that the reset handler will know
* this command has completed. Then, check to see if the handler is
* waiting for this command, and, if so, wake it.
*/
c->scsi_cmd = SCSI_CMD_IDLE;
mb(); /* Declare command idle before checking for pending events. */
- if (c->abort_pending) {
- do_wake = true;
- c->abort_pending = false;
- }
if (c->reset_pending) {
unsigned long flags;
struct hpsa_scsi_dev_t *dev;
@@ -2416,20 +2395,6 @@ static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
}
-static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
-{
- cmd->result = DID_ABORT << 16;
-}
-
-static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
- struct scsi_cmnd *cmd)
-{
- hpsa_set_scsi_cmd_aborted(cmd);
- dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
- c->Request.CDB, c->err_info->ScsiStatus);
- hpsa_cmd_resolve_and_free(h, c);
-}
-
static void process_ioaccel2_completion(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
struct hpsa_scsi_dev_t *dev)
@@ -2554,12 +2519,9 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
- if ((unlikely(hpsa_is_pending_event(cp)))) {
+ if ((unlikely(hpsa_is_pending_event(cp))))
if (cp->reset_pending)
return hpsa_cmd_free_and_done(h, cp, cmd);
- if (cp->abort_pending)
- return hpsa_cmd_abort_and_free(h, cp, cmd);
- }
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
@@ -2679,8 +2641,8 @@ static void complete_scsi_command(struct CommandList *cp)
cp->Request.CDB);
break;
case CMD_ABORTED:
- /* Return now to avoid calling scsi_done(). */
- return hpsa_cmd_abort_and_free(h, cp, cmd);
+ cmd->result = DID_ABORT << 16;
+ break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
@@ -3090,7 +3052,7 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
else
- wait_for_device_to_become_ready(h, scsi3addr, 0);
+ rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3165,7 +3127,7 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
le16_to_cpu(map_buff->layout_map_count));
dev_info(&h->pdev->dev, "flags = 0x%x\n",
le16_to_cpu(map_buff->flags));
- dev_info(&h->pdev->dev, "encrypytion = %s\n",
+ dev_info(&h->pdev->dev, "encryption = %s\n",
le16_to_cpu(map_buff->flags) &
RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dev_info(&h->pdev->dev, "dekindex = %u\n",
@@ -3353,6 +3315,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ if (encl_dev->target == -1 || encl_dev->lun == -1) {
+ rc = IO_OK;
+ goto out;
+ }
+
if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
rc = IO_OK;
goto out;
@@ -3781,53 +3748,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
return HPSA_LV_OK;
}
-/*
- * Find out if a logical device supports aborts by simply trying one.
- * Smart Array may claim not to support aborts on logical drives, but
- * if a MSA2000 * is connected, the drives on that will be presented
- * by the Smart Array as logical drives, and aborts may be sent to
- * those devices successfully. So the simplest way to find out is
- * to simply try an abort and see how the device responds.
- */
-static int hpsa_device_supports_aborts(struct ctlr_info *h,
- unsigned char *scsi3addr)
-{
- struct CommandList *c;
- struct ErrorInfo *ei;
- int rc = 0;
-
- u64 tag = (u64) -1; /* bogus tag */
-
- /* Assume that physical devices support aborts */
- if (!is_logical_dev_addr_mode(scsi3addr))
- return 1;
-
- c = cmd_alloc(h);
-
- (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
- (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
- /* no unmap needed here because no data xfer. */
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_INVALID:
- rc = 0;
- break;
- case CMD_UNABORTABLE:
- case CMD_ABORT_FAILED:
- rc = 1;
- break;
- case CMD_TMF_STATUS:
- rc = hpsa_evaluate_tmf_status(h, c);
- break;
- default:
- rc = 0;
- break;
- }
- cmd_free(h, c);
- return rc;
-}
-
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -3907,6 +3827,9 @@ static int hpsa_update_device_info(struct ctlr_info *h,
this_device->queue_depth = h->nr_cmds;
}
+ if (this_device->external)
+ this_device->queue_depth = EXTERNAL_QD;
+
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
* by looking for "$DR-10" at offset 43 in inquiry data.
@@ -3924,31 +3847,6 @@ bail_out:
return rc;
}
-static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
- struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
-{
- unsigned long flags;
- int rc, entry;
- /*
- * See if this device supports aborts. If we already know
- * the device, we already know if it supports aborts, otherwise
- * we have to find out if it supports aborts by trying one.
- */
- spin_lock_irqsave(&h->devlock, flags);
- rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
- if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
- entry >= 0 && entry < h->ndevices) {
- dev->supports_aborts = h->dev[entry]->supports_aborts;
- spin_unlock_irqrestore(&h->devlock, flags);
- } else {
- spin_unlock_irqrestore(&h->devlock, flags);
- dev->supports_aborts =
- hpsa_device_supports_aborts(h, scsi3addr);
- if (dev->supports_aborts < 0)
- dev->supports_aborts = 0;
- }
-}
-
/*
* Helper function to assign bus, target, lun mapping of devices.
* Logical drive target and lun are assigned at this time, but
@@ -3986,35 +3884,6 @@ static void figure_bus_target_lun(struct ctlr_info *h,
0, lunid & 0x3fff);
}
-
-/*
- * Get address of physical disk used for an ioaccel2 mode command:
- * 1. Extract ioaccel2 handle from the command.
- * 2. Find a matching ioaccel2 handle from list of physical disks.
- * 3. Return:
- * 1 and set scsi3addr to address of matching physical
- * 0 if no matching physical disk was found.
- */
-static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
- struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
-{
- struct io_accel2_cmd *c2 =
- &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&h->devlock, flags);
- for (i = 0; i < h->ndevices; i++)
- if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
- memcpy(scsi3addr, h->dev[i]->scsi3addr,
- sizeof(h->dev[i]->scsi3addr));
- spin_unlock_irqrestore(&h->devlock, flags);
- return 1;
- }
- spin_unlock_irqrestore(&h->devlock, flags);
- return 0;
-}
-
static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
int i, int nphysicals, int nlocal_logicals)
{
@@ -4115,14 +3984,6 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
int rc;
struct ext_report_lun_entry *rle;
- /*
- * external targets don't support BMIC
- */
- if (dev->external) {
- dev->queue_depth = 7;
- return;
- }
-
rle = &rlep->LUN[rle_index];
dev->ioaccel_handle = rle->ioaccel_handle;
@@ -4387,7 +4248,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
}
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
- hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
this_device = currentsd[ncurrent];
/* Turn on discovery_polling if there are ext target devices.
@@ -4584,7 +4444,55 @@ sglist_finished:
return 0;
}
-#define IO_ACCEL_INELIGIBLE (1)
+#define BUFLEN 128
+static inline void warn_zero_length_transfer(struct ctlr_info *h,
+ u8 *cdb, int cdb_len,
+ const char *func)
+{
+ char buf[BUFLEN];
+ int outlen;
+ int i;
+
+ outlen = scnprintf(buf, BUFLEN,
+ "%s: Blocking zero-length request: CDB:", func);
+ for (i = 0; i < cdb_len; i++)
+ outlen += scnprintf(buf+outlen, BUFLEN - outlen,
+ "%02hhx", cdb[i]);
+ dev_warn(&h->pdev->dev, "%s\n", buf);
+}
+
+#define IO_ACCEL_INELIGIBLE 1
+/* zero-length transfers trigger hardware errors. */
+static bool is_zero_length_transfer(u8 *cdb)
+{
+ u32 block_cnt;
+
+ /* Block zero-length transfer sizes on certain commands. */
+ switch (cdb[0]) {
+ case READ_10:
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ block_cnt = get_unaligned_be16(&cdb[7]);
+ break;
+ case READ_12:
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ block_cnt = get_unaligned_be32(&cdb[6]);
+ break;
+ case READ_16:
+ case WRITE_16:
+ case VERIFY_16: /* 0x8F */
+ block_cnt = get_unaligned_be32(&cdb[10]);
+ break;
+ default:
+ return false;
+ }
+
+ return block_cnt == 0;
+}
+
static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
{
int is_write = 0;
@@ -4651,6 +4559,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+ if (is_zero_length_transfer(cdb)) {
+ warn_zero_length_transfer(h, cdb, cdb_len, __func__);
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
@@ -4815,6 +4729,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+ if (is_zero_length_transfer(cdb)) {
+ warn_zero_length_transfer(h, cdb, cdb_len, __func__);
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
@@ -5460,9 +5380,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
return hpsa_cmd_free_and_done(c->h, c, cmd);
}
if (c->reset_pending)
- return hpsa_cmd_resolve_and_free(c->h, c);
- if (c->abort_pending)
- return hpsa_cmd_abort_and_free(c->h, c, cmd);
+ return hpsa_cmd_free_and_done(c->h, c, cmd);
if (c->cmd_type == CMD_IOACCEL2) {
struct ctlr_info *h = c->h;
struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -5613,10 +5531,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
/*
* Do the scan after a reset completion
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+ hpsa_scan_complete(h);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
hpsa_update_scsi_devices(h);
@@ -5828,24 +5750,37 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
*/
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
- int rc;
+ int rc = SUCCESS;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
u8 reset_type;
char msg[48];
+ unsigned long flags;
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
- if (lockup_detected(h))
- return FAILED;
+ spin_lock_irqsave(&h->reset_lock, flags);
+ h->reset_in_progress = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+
+ if (lockup_detected(h)) {
+ rc = FAILED;
+ goto return_reset_status;
+ }
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
+ }
+
+ if (dev->devtype == TYPE_ENCLOSURE) {
+ rc = SUCCESS;
+ goto return_reset_status;
}
/* if controller locked up, we can guarantee command won't complete */
@@ -5854,7 +5789,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
"cmd %d RESET FAILED, lockup detected",
hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
}
/* this reset request might be the result of a lockup; check */
@@ -5863,12 +5799,15 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
"cmd %d RESET FAILED, new lockup detected",
hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
}
/* Do not attempt on controller */
- if (is_hba_lunid(dev->scsi3addr))
- return SUCCESS;
+ if (is_hba_lunid(dev->scsi3addr)) {
+ rc = SUCCESS;
+ goto return_reset_status;
+ }
if (is_logical_dev_addr_mode(dev->scsi3addr))
reset_type = HPSA_DEVICE_RESET_MSG;
@@ -5879,446 +5818,26 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- h->reset_in_progress = 1;
-
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
DEFAULT_REPLY_QUEUE);
+ if (rc == 0)
+ rc = SUCCESS;
+ else
+ rc = FAILED;
+
sprintf(msg, "reset %s %s",
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
- rc == 0 ? "completed successfully" : "failed");
+ rc == SUCCESS ? "completed successfully" : "failed");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- h->reset_in_progress = 0;
- return rc == 0 ? SUCCESS : FAILED;
-}
-
-static void swizzle_abort_tag(u8 *tag)
-{
- u8 original_tag[8];
-
- memcpy(original_tag, tag, 8);
- tag[0] = original_tag[3];
- tag[1] = original_tag[2];
- tag[2] = original_tag[1];
- tag[3] = original_tag[0];
- tag[4] = original_tag[7];
- tag[5] = original_tag[6];
- tag[6] = original_tag[5];
- tag[7] = original_tag[4];
-}
-
-static void hpsa_get_tag(struct ctlr_info *h,
- struct CommandList *c, __le32 *taglower, __le32 *tagupper)
-{
- u64 tag;
- if (c->cmd_type == CMD_IOACCEL1) {
- struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
- &h->ioaccel_cmd_pool[c->cmdindex];
- tag = le64_to_cpu(cm1->tag);
- *tagupper = cpu_to_le32(tag >> 32);
- *taglower = cpu_to_le32(tag);
- return;
- }
- if (c->cmd_type == CMD_IOACCEL2) {
- struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
- &h->ioaccel2_cmd_pool[c->cmdindex];
- /* upper tag not used in ioaccel2 mode */
- memset(tagupper, 0, sizeof(*tagupper));
- *taglower = cm2->Tag;
- return;
- }
- tag = le64_to_cpu(c->Header.tag);
- *tagupper = cpu_to_le32(tag >> 32);
- *taglower = cpu_to_le32(tag);
-}
-
-static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
- struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct CommandList *c;
- struct ErrorInfo *ei;
- __le32 tagupper, taglower;
-
- c = cmd_alloc(h);
-
- /* fill_cmd can't fail here, no buffer to map */
- (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
- 0, 0, scsi3addr, TYPE_MSG);
- if (h->needs_abort_tags_swizzled)
- swizzle_abort_tag(&c->Request.CDB[4]);
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
- __func__, tagupper, taglower);
- /* no unmap needed here because no data xfer. */
-
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_SUCCESS:
- break;
- case CMD_TMF_STATUS:
- rc = hpsa_evaluate_tmf_status(h, c);
- break;
- case CMD_UNABORTABLE: /* Very common, don't make noise. */
- rc = -1;
- break;
- default:
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
- __func__, tagupper, taglower);
- hpsa_scsi_interpret_error(h, c);
- rc = -1;
- break;
- }
- cmd_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
- __func__, tagupper, taglower);
- return rc;
-}
-
-static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
- struct CommandList *command_to_abort, int reply_queue)
-{
- struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
- struct io_accel2_cmd *c2a =
- &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
- struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
- struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
- if (!dev)
- return;
-
- /*
- * We're overlaying struct hpsa_tmf_struct on top of something which
- * was allocated as a struct io_accel2_cmd, so we better be sure it
- * actually fits, and doesn't overrun the error info space.
- */
- BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
- sizeof(struct io_accel2_cmd));
- BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
- offsetof(struct hpsa_tmf_struct, error_len) +
- sizeof(ac->error_len));
-
- c->cmd_type = IOACCEL2_TMF;
- c->scsi_cmd = SCSI_CMD_BUSY;
-
- /* Adjust the DMA address to point to the accelerated command buffer */
- c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
- (c->cmdindex * sizeof(struct io_accel2_cmd));
- BUG_ON(c->busaddr & 0x0000007F);
-
- memset(ac, 0, sizeof(*c2)); /* yes this is correct */
- ac->iu_type = IOACCEL2_IU_TMF_TYPE;
- ac->reply_queue = reply_queue;
- ac->tmf = IOACCEL2_TMF_ABORT;
- ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
- memset(ac->lun_id, 0, sizeof(ac->lun_id));
- ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
- ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
- ac->error_ptr = cpu_to_le64(c->busaddr +
- offsetof(struct io_accel2_cmd, error_data));
- ac->error_len = cpu_to_le32(sizeof(c2->error_data));
-}
-
-/* ioaccel2 path firmware cannot handle abort task requests.
- * Change abort requests to physical target reset, and send to the
- * address of the physical disk used for the ioaccel 2 command.
- * Return 0 on success (IO_OK)
- * -1 on failure
- */
-
-static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct scsi_cmnd *scmd; /* scsi command within request being aborted */
- struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
- unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
- unsigned char *psa = &phys_scsi3addr[0];
-
- /* Get a pointer to the hpsa logical device. */
- scmd = abort->scsi_cmd;
- dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
- if (dev == NULL) {
- dev_warn(&h->pdev->dev,
- "Cannot abort: no device pointer for command.\n");
- return -1; /* not abortable */
- }
-
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
- h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort", scsi3addr);
-
- if (!dev->offload_enabled) {
- dev_warn(&h->pdev->dev,
- "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
- return -1; /* not abortable */
- }
-
- /* Incoming scsi3addr is logical addr. We need physical disk addr. */
- if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
- dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
- return -1; /* not abortable */
- }
-
- /* send the reset */
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
- psa);
- rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
- if (rc != 0) {
- dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
- psa);
- return rc; /* failed to reset */
- }
-
- /* wait for device to recover */
- if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
- dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
- psa);
- return -1; /* failed to recover */
- }
-
- /* device recovered */
- dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
- psa);
-
- return rc; /* success */
-}
-
-static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
- struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct CommandList *c;
- __le32 taglower, tagupper;
- struct hpsa_scsi_dev_t *dev;
- struct io_accel2_cmd *c2;
-
- dev = abort->scsi_cmd->device->hostdata;
- if (!dev)
- return -1;
-
- if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
- return -1;
-
- c = cmd_alloc(h);
- setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
- c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- dev_dbg(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
- __func__, tagupper, taglower);
- /* no unmap needed here because no data xfer. */
-
- dev_dbg(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
- __func__, tagupper, taglower, c2->error_data.serv_response);
- switch (c2->error_data.serv_response) {
- case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
- case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
- rc = 0;
- break;
- case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
- case IOACCEL2_SERV_RESPONSE_FAILURE:
- case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
- rc = -1;
- break;
- default:
- dev_warn(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
- __func__, tagupper, taglower,
- c2->error_data.serv_response);
- rc = -1;
- }
- cmd_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
- tagupper, taglower);
+return_reset_status:
+ spin_lock_irqsave(&h->reset_lock, flags);
+ h->reset_in_progress = 0;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return rc;
}
-static int hpsa_send_abort_both_ways(struct ctlr_info *h,
- struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
-{
- /*
- * ioccelerator mode 2 commands should be aborted via the
- * accelerated path, since RAID path is unaware of these commands,
- * but not all underlying firmware can handle abort TMF.
- * Change abort to physical device reset when abort TMF is unsupported.
- */
- if (abort->cmd_type == CMD_IOACCEL2) {
- if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
- dev->physical_device)
- return hpsa_send_abort_ioaccel2(h, abort,
- reply_queue);
- else
- return hpsa_send_reset_as_abort_ioaccel2(h,
- dev->scsi3addr,
- abort, reply_queue);
- }
- return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
-}
-
-/* Find out which reply queue a command was meant to return on */
-static int hpsa_extract_reply_queue(struct ctlr_info *h,
- struct CommandList *c)
-{
- if (c->cmd_type == CMD_IOACCEL2)
- return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
- return c->Header.ReplyQueue;
-}
-
-/*
- * Limit concurrency of abort commands to prevent
- * over-subscription of commands
- */
-static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
-{
-#define ABORT_CMD_WAIT_MSECS 5000
- return !wait_event_timeout(h->abort_cmd_wait_queue,
- atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
- msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
-}
-
-/* Send an abort for the specified command.
- * If the device and controller support it,
- * send a task abort request.
- */
-static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
-{
-
- int rc;
- struct ctlr_info *h;
- struct hpsa_scsi_dev_t *dev;
- struct CommandList *abort; /* pointer to command to be aborted */
- struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
- char msg[256]; /* For debug messaging. */
- int ml = 0;
- __le32 tagupper, taglower;
- int refcount, reply_queue;
-
- if (sc == NULL)
- return FAILED;
-
- if (sc->device == NULL)
- return FAILED;
-
- /* Find the controller of the command to be aborted */
- h = sdev_to_hba(sc->device);
- if (h == NULL)
- return FAILED;
-
- /* Find the device of the command to be aborted */
- dev = sc->device->hostdata;
- if (!dev) {
- dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
- msg);
- return FAILED;
- }
-
- /* If controller locked up, we can guarantee command won't complete */
- if (lockup_detected(h)) {
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "ABORT FAILED, lockup detected");
- return FAILED;
- }
-
- /* This is a good time to check if controller lockup has occurred */
- if (detect_controller_lockup(h)) {
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "ABORT FAILED, new lockup detected");
- return FAILED;
- }
-
- /* Check that controller supports some kind of task abort */
- if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
- !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
- return FAILED;
-
- memset(msg, 0, sizeof(msg));
- ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
- h->scsi_host->host_no, sc->device->channel,
- sc->device->id, sc->device->lun,
- "Aborting command", sc);
-
- /* Get SCSI command to be aborted */
- abort = (struct CommandList *) sc->host_scribble;
- if (abort == NULL) {
- /* This can happen if the command already completed. */
- return SUCCESS;
- }
- refcount = atomic_inc_return(&abort->refcount);
- if (refcount == 1) { /* Command is done already. */
- cmd_free(h, abort);
- return SUCCESS;
- }
-
- /* Don't bother trying the abort if we know it won't work. */
- if (abort->cmd_type != CMD_IOACCEL2 &&
- abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
- cmd_free(h, abort);
- return FAILED;
- }
-
- /*
- * Check that we're aborting the right command.
- * It's possible the CommandList already completed and got re-used.
- */
- if (abort->scsi_cmd != sc) {
- cmd_free(h, abort);
- return SUCCESS;
- }
-
- abort->abort_pending = true;
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- reply_queue = hpsa_extract_reply_queue(h, abort);
- ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
- as = abort->scsi_cmd;
- if (as != NULL)
- ml += sprintf(msg+ml,
- "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
- as->cmd_len, as->cmnd[0], as->cmnd[1],
- as->serial_number);
- dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
- hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
-
- /*
- * Command is in flight, or possibly already completed
- * by the firmware (but not to the scsi mid layer) but we can't
- * distinguish which. Send the abort down.
- */
- if (wait_for_available_abort_cmd(h)) {
- dev_warn(&h->pdev->dev,
- "%s FAILED, timeout waiting for an abort command to become available.\n",
- msg);
- cmd_free(h, abort);
- return FAILED;
- }
- rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
- atomic_inc(&h->abort_cmds_available);
- wake_up_all(&h->abort_cmd_wait_queue);
- if (rc != 0) {
- dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "FAILED to abort command");
- cmd_free(h, abort);
- return FAILED;
- }
- dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
- wait_event(h->event_sync_wait_queue,
- abort->scsi_cmd != sc || lockup_detected(h));
- cmd_free(h, abort);
- return !lockup_detected(h) ? SUCCESS : FAILED;
-}
-
/*
* For operations with an associated SCSI command, a command block is allocated
* at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
@@ -6364,9 +5883,7 @@ static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
{
/*
* Release our reference to the block. We don't need to do anything
- * else to free it, because it is accessed by index. (There's no point
- * in checking the result of the decrement, since we cannot guarantee
- * that there isn't a concurrent abort which is also accessing it.)
+ * else to free it, because it is accessed by index.
*/
(void)atomic_dec(&c->refcount);
}
@@ -6905,7 +6422,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
int cmd_type)
{
int pci_dir = XFER_NONE;
- u64 tag; /* for commands to be aborted */
c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY;
@@ -7089,27 +6605,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[6] = 0x00;
c->Request.CDB[7] = 0x00;
break;
- case HPSA_ABORT_MSG:
- memcpy(&tag, buff, sizeof(tag));
- dev_dbg(&h->pdev->dev,
- "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
- tag, c->Header.tag);
- c->Request.CDBLen = 16;
- c->Request.type_attr_dir =
- TYPE_ATTR_DIR(cmd_type,
- ATTR_SIMPLE, XFER_WRITE);
- c->Request.Timeout = 0; /* Don't time out */
- c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
- c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
- c->Request.CDB[2] = 0x00; /* reserved */
- c->Request.CDB[3] = 0x00; /* reserved */
- /* Tag to abort goes in CDB[4]-CDB[11] */
- memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
- c->Request.CDB[12] = 0x00; /* reserved */
- c->Request.CDB[13] = 0x00; /* reserved */
- c->Request.CDB[14] = 0x00; /* reserved */
- c->Request.CDB[15] = 0x00; /* reserved */
- break;
default:
dev_warn(&h->pdev->dev, "unknown message type %d\n",
cmd);
@@ -8067,9 +7562,6 @@ static int hpsa_pci_init(struct ctlr_info *h)
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
- h->needs_abort_tags_swizzled =
- ctlr_needs_abort_tags_swizzled(h->board_id);
-
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
@@ -8627,41 +8119,79 @@ out:
return rc;
}
-static void hpsa_rescan_ctlr_worker(struct work_struct *work)
+static void hpsa_perform_rescan(struct ctlr_info *h)
{
+ struct Scsi_Host *sh = NULL;
unsigned long flags;
- struct ctlr_info *h = container_of(to_delayed_work(work),
- struct ctlr_info, rescan_ctlr_work);
-
-
- if (h->remove_in_progress)
- return;
/*
* Do the scan after the reset
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+
+ sh = scsi_host_get(h->scsi_host);
+ if (sh != NULL) {
+ hpsa_scan_start(sh);
+ scsi_host_put(sh);
+ h->drv_req_rescan = 0;
+ }
+}
+
+/*
+ * watch for controller events
+ */
+static void hpsa_event_monitor_worker(struct work_struct *work)
+{
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, event_monitor_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (h->remove_in_progress) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
- if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
- scsi_host_get(h->scsi_host);
+ if (hpsa_ctlr_needs_rescan(h)) {
hpsa_ack_ctlr_events(h);
- hpsa_scan_start(h->scsi_host);
- scsi_host_put(h->scsi_host);
+ hpsa_perform_rescan(h);
+ }
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (!h->remove_in_progress)
+ schedule_delayed_work(&h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static void hpsa_rescan_ctlr_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, rescan_ctlr_work);
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (h->remove_in_progress) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
+ hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
- struct Scsi_Host *sh = NULL;
-
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
- sh = scsi_host_get(h->scsi_host);
- if (sh != NULL) {
- hpsa_scan_start(sh);
- scsi_host_put(sh);
- }
+ hpsa_perform_rescan(h);
}
}
spin_lock_irqsave(&h->lock, flags);
@@ -8750,8 +8280,8 @@ reinit_after_soft_reset:
spin_lock_init(&h->lock);
spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
+ spin_lock_init(&h->reset_lock);
atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
- atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
@@ -8803,7 +8333,6 @@ reinit_after_soft_reset:
if (rc)
goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
init_waitqueue_head(&h->scan_wait_queue);
- init_waitqueue_head(&h->abort_cmd_wait_queue);
init_waitqueue_head(&h->event_sync_wait_queue);
mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
@@ -8926,6 +8455,9 @@ reinit_after_soft_reset:
INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
h->heartbeat_sample_interval);
+ INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
+ schedule_delayed_work(&h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
return 0;
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
@@ -9094,6 +8626,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
spin_unlock_irqrestore(&h->lock, flags);
cancel_delayed_work_sync(&h->monitor_ctlr_work);
cancel_delayed_work_sync(&h->rescan_ctlr_work);
+ cancel_delayed_work_sync(&h->event_monitor_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6f04f2ad4125..1c49741bc639 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -57,6 +57,7 @@ struct hpsa_sas_phy {
bool added_to_port;
};
+#define EXTERNAL_QD 7
struct hpsa_scsi_dev_t {
unsigned int devtype;
int bus, target, lun; /* as presented to the OS */
@@ -244,6 +245,7 @@ struct ctlr_info {
u32 __percpu *lockup_detected;
struct delayed_work monitor_ctlr_work;
struct delayed_work rescan_ctlr_work;
+ struct delayed_work event_monitor_work;
int remove_in_progress;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
@@ -296,11 +298,11 @@ struct ctlr_info {
struct workqueue_struct *resubmit_wq;
struct workqueue_struct *rescan_ctlr_wq;
atomic_t abort_cmds_available;
- wait_queue_head_t abort_cmd_wait_queue;
wait_queue_head_t event_sync_wait_queue;
struct mutex reset_mutex;
u8 reset_in_progress;
struct hpsa_sas_node *sas_host;
+ spinlock_t reset_lock;
};
struct offline_device_entry {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 5961705eef76..078afe448115 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -809,10 +809,7 @@ struct bmic_identify_physical_device {
u8 max_temperature_degreesC;
u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
__le16 current_queue_depth_limit;
- u8 switch_name[10];
- __le16 switch_port;
- u8 alternate_paths_switch_name[40];
- u8 alternate_paths_switch_port[8];
+ u8 reserved_switch_stuff[60];
__le16 power_on_hours; /* valid only if gas gauge supported */
__le16 percent_endurance_used; /* valid only if gas gauge supported. */
#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
@@ -828,11 +825,22 @@ struct bmic_identify_physical_device {
(idphydrv->smart_carrier_authentication == 0x01)
u8 smart_carrier_app_fw_version;
u8 smart_carrier_bootloader_fw_version;
+ u8 sanitize_support_flags;
+ u8 drive_key_flags;
u8 encryption_key_name[64];
__le32 misc_drive_flags;
__le16 dek_index;
- u8 padding[112];
-};
+ __le16 hba_drive_encryption_flags;
+ __le16 max_overwrite_time;
+ __le16 max_block_erase_time;
+ __le16 max_crypto_erase_time;
+ u8 device_connector_info[5];
+ u8 connector_name[8][8];
+ u8 page_83_id[16];
+ u8 max_link_rate[256];
+ u8 neg_phys_link_rate[256];
+ u8 box_conn_name[8];
+} __attribute((aligned(512)));
struct bmic_sense_subsystem_info {
u8 primary_slot_number;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index db17ad15b0c1..7226226f7383 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -800,7 +800,7 @@ static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
hptiop_finish_scsi_req(hba, tag, req);
}
-void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
struct hpt_iop_request_header __iomem *req;
struct hpt_iop_request_ioctl_command __iomem *p;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 26cd3c28186a..cc4e05be8d4a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4935,7 +4935,7 @@ static struct vio_device_id ibmvfc_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
-static struct dev_pm_ops ibmvfc_pm_ops = {
+static const struct dev_pm_ops ibmvfc_pm_ops = {
.resume = ibmvfc_resume
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1deb0a9f14a6..da22b3665cb0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2336,7 +2336,7 @@ static struct vio_device_id ibmvscsi_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
-static struct dev_pm_ops ibmvscsi_pm_ops = {
+static const struct dev_pm_ops ibmvscsi_pm_ops = {
.resume = ibmvscsi_resume
};
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index abf6026645dd..659ab483d716 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3934,10 +3934,6 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
static void ibmvscsis_dev_release(struct device *dev) {};
-static struct class_attribute ibmvscsis_class_attrs[] = {
- __ATTR_NULL,
-};
-
static struct device_attribute dev_attr_system_id =
__ATTR(system_id, S_IRUGO, system_id_show, NULL);
@@ -3957,7 +3953,6 @@ ATTRIBUTE_GROUPS(ibmvscsis_dev);
static struct class ibmvscsis_class = {
.name = "ibmvscsis",
.dev_release = ibmvscsis_dev_release,
- .class_attrs = ibmvscsis_class_attrs,
.dev_groups = ibmvscsis_dev_groups,
};
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 3419e1bcdff6..67621308eb9c 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
static uint32_t ips_statupd_morpheus(ips_ha_t *);
static ips_scb_t *ips_getscb(ips_ha_t *);
static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
-static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
+static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
static void ips_putq_copp_tail(ips_copp_queue_t *,
ips_copp_wait_item_t *);
static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
-static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
-static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
struct scsi_cmnd *);
static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
ips_copp_wait_item_t *);
@@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
-static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
+static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
{
METHOD_TRACE("ips_putq_wait_tail", 1);
@@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
-static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
+static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
{
struct scsi_cmnd *item;
@@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
/* ASSUMED to be called from within the HA lock */
/* */
/****************************************************************************/
-static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
+static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
struct scsi_cmnd *item)
{
struct scsi_cmnd *p;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b782bb60baf0..366be3b2f9b4 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -989,7 +989,7 @@ typedef struct ips_wait_queue {
struct scsi_cmnd *head;
struct scsi_cmnd *tail;
int count;
-} ips_wait_queue_t;
+} ips_wait_queue_entry_t;
typedef struct ips_copp_wait_item {
struct scsi_cmnd *scsi_cmd;
@@ -1035,7 +1035,7 @@ typedef struct ips_ha {
ips_stat_t sp; /* Status packer pointer */
struct ips_scb *scbs; /* Array of all CCBS */
struct ips_scb *scb_freelist; /* SCB free list */
- ips_wait_queue_t scb_waitlist; /* Pending SCB list */
+ ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */
ips_copp_queue_t copp_waitlist; /* Pending PT list */
ips_scb_queue_t scb_activelist; /* Active SCB list */
IPS_IO_CMD *dummy; /* dummy command */
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index d623d084b7ec..dbadbc81b24b 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -178,7 +178,7 @@ void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
fill = -fr_len(fp) & 3;
if (fill) {
/* TODO, this may be a problem with fragmented skb */
- memset(skb_put(fp_skb(fp), fill), 0, fill);
+ skb_put_zero(fp_skb(fp), fill);
f_ctl |= fill;
}
fr_eof(fp) = FC_EOF_T;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index dd6828f7f772..42381adf0769 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2556,7 +2556,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
* the array. */
if (items)
num_arrays++;
- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+ q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
if (q->pool == NULL)
return -ENOMEM;
@@ -2590,7 +2590,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
for (i = 0; i < q->max; i++)
kfree(q->pool[i]);
- kfree(q->pool);
+ kvfree(q->pool);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index aadbd5314c5c..c0d0d979b76d 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,30 +27,38 @@
#include "sas_internal.h"
#include "sas_dump.h"
-void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
+ int rc = 0;
+
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return;
+ return 0;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add(&sw->drain_node, &ha->defer_q);
} else
- scsi_queue_work(ha->core.shost, &sw->work);
+ rc = scsi_queue_work(ha->core.shost, &sw->work);
+
+ return rc;
}
-static void sas_queue_event(int event, unsigned long *pending,
+static int sas_queue_event(int event, unsigned long *pending,
struct sas_work *work,
struct sas_ha_struct *ha)
{
+ int rc = 0;
+
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->lock, flags);
- sas_queue_work(ha, work);
+ rc = sas_queue_work(ha, work);
spin_unlock_irqrestore(&ha->lock, flags);
}
+
+ return rc;
}
@@ -116,32 +124,32 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
mutex_unlock(&ha->disco_mutex);
}
-static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
+static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
{
BUG_ON(event >= HA_NUM_EVENTS);
- sas_queue_event(event, &sas_ha->pending,
- &sas_ha->ha_events[event].work, sas_ha);
+ return sas_queue_event(event, &sas_ha->pending,
+ &sas_ha->ha_events[event].work, sas_ha);
}
-static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
struct sas_ha_struct *ha = phy->ha;
BUG_ON(event >= PORT_NUM_EVENTS);
- sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ return sas_queue_event(event, &phy->port_events_pending,
+ &phy->port_events[event].work, ha);
}
-void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
struct sas_ha_struct *ha = phy->ha;
BUG_ON(event >= PHY_NUM_EVENTS);
- sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ return sas_queue_event(event, &phy->phy_events_pending,
+ &phy->phy_events[event].work, ha);
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b306b7843d99..a216c957b639 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -76,7 +76,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -85,7 +85,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f2c0ba6ced78..562dc0139735 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -756,6 +756,7 @@ struct lpfc_hba {
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
+ uint32_t initial_imax;
/* HBA Config Parameters */
uint32_t cfg_ack0;
@@ -777,6 +778,7 @@ struct lpfc_hba {
uint32_t cfg_poll_tmo;
uint32_t cfg_task_mgmt_tmo;
uint32_t cfg_use_msi;
+ uint32_t cfg_auto_imax;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel;
@@ -913,16 +915,16 @@ struct lpfc_hba {
/*
* stat counters
*/
- uint64_t fc4ScsiInputRequests;
- uint64_t fc4ScsiOutputRequests;
- uint64_t fc4ScsiControlRequests;
- uint64_t fc4ScsiIoCmpls;
- uint64_t fc4NvmeInputRequests;
- uint64_t fc4NvmeOutputRequests;
- uint64_t fc4NvmeControlRequests;
- uint64_t fc4NvmeIoCmpls;
- uint64_t fc4NvmeLsRequests;
- uint64_t fc4NvmeLsCmpls;
+ atomic_t fc4ScsiInputRequests;
+ atomic_t fc4ScsiOutputRequests;
+ atomic_t fc4ScsiControlRequests;
+ atomic_t fc4ScsiIoCmpls;
+ atomic_t fc4NvmeInputRequests;
+ atomic_t fc4NvmeOutputRequests;
+ atomic_t fc4NvmeControlRequests;
+ atomic_t fc4NvmeIoCmpls;
+ atomic_t fc4NvmeLsRequests;
+ atomic_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
@@ -1050,6 +1052,7 @@ struct lpfc_hba {
uint8_t temp_sensor_support;
/* Fields used for heart beat. */
+ unsigned long last_eqdelay_time;
unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bb2d9e238225..4ed48ed38e79 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,9 +148,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
+ struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
+ uint64_t data1, data2, data3, tot;
char *statep;
int len = 0;
@@ -171,7 +171,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
else
statep = "INIT";
len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME Target: Enabled State %s\n",
+ "NVME Target Enabled State %s\n",
statep);
len += snprintf(buf + len, PAGE_SIZE - len,
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
@@ -245,11 +245,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
len += snprintf(buf + len, PAGE_SIZE - len,
- "IO_CTX: %08x outstanding %08x total %x",
- phba->sli4_hba.nvmet_ctx_cnt,
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n",
+ phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total);
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
return len;
@@ -265,7 +275,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
- lport = (struct lpfc_nvme_lport *)localport->private;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -281,9 +290,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
- list_for_each_entry(rport, &lport->rport_list, list) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!ndlp->nrport)
+ continue;
+
/* local short-hand pointer. */
- nrport = rport->remoteport;
+ nrport = ndlp->nrport->remoteport;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -311,25 +323,23 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
nrport->port_id);
- switch (nrport->port_role) {
- case FC_PORT_ROLE_NVME_INITIATOR:
+ /* An NVME rport can have multiple roles. */
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
len += snprintf(buf + len, PAGE_SIZE - len,
"INITIATOR ");
- break;
- case FC_PORT_ROLE_NVME_TARGET:
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
len += snprintf(buf + len, PAGE_SIZE - len,
"TARGET ");
- break;
- case FC_PORT_ROLE_NVME_DISCOVERY:
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
len += snprintf(buf + len, PAGE_SIZE - len,
- "DISCOVERY ");
- break;
- default:
+ "DISCSRVC ");
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ FC_PORT_ROLE_NVME_TARGET |
+ FC_PORT_ROLE_NVME_DISCOVERY))
len += snprintf(buf + len, PAGE_SIZE - len,
- "UNKNOWN_ROLE x%x",
+ "UNKNOWN ROLE x%x",
nrport->port_role);
- break;
- }
+
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
/* Terminate the string. */
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -338,19 +348,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016llx Cmpl %016llx\n",
- phba->fc4NvmeLsRequests,
- phba->fc4NvmeLsCmpls);
-
+ "LS: Xmt %016x Cmpl %016x\n",
+ atomic_read(&phba->fc4NvmeLsRequests),
+ atomic_read(&phba->fc4NvmeLsCmpls));
+
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(&phba->fc4NvmeInputRequests);
+ data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
- phba->fc4NvmeInputRequests,
- phba->fc4NvmeOutputRequests,
- phba->fc4NvmeControlRequests);
+ data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
-
+ " Cmpl %016llx Outstanding %016llx\n",
+ tot, (data1 + data2 + data3) - tot);
return len;
}
@@ -1342,6 +1354,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
goto board_mode_out;
}
wait_for_completion(&online_compl);
+ if (status)
+ status = -EIO;
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
@@ -3198,9 +3212,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+ }
spin_unlock_irq(shost->host_lock);
}
@@ -4467,9 +4484,11 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
+ phba->initial_imax = phba->cfg_fcp_imax;
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
- lpfc_modify_hba_eq_delay(phba, i);
+ lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
+ val);
return strlen(buf);
}
@@ -4524,6 +4543,16 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+/*
+ * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
+ * 0 No auto_imax support
+ * 1 auto imax on
+ * Auto imax will change the value of fcp_imax on a per EQ basis, using
+ * the EQ Delay Multiplier, depending on the activity for that EQ.
+ * Value range [0,1]. Default value is 1.
+ */
+LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
+
/**
* lpfc_state_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
@@ -5150,6 +5179,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_nvme_oas,
+ &dev_attr_lpfc_auto_imax,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_io_channel,
@@ -6168,6 +6198,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
+ lpfc_auto_imax_init(phba, lpfc_auto_imax);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
@@ -6212,6 +6243,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
}
+ if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
+ phba->cfg_auto_imax = 0;
+ phba->initial_imax = phba->cfg_fcp_imax;
+
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 24ce96dcc94d..9c0c1463057d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -503,26 +503,23 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
/*
- * This NPortID was previously a FCP target,
+ * This NPortID was previously a FCP/NVMe target,
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- ndlp->nlp_fc4_type = fc4_type;
-
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- ndlp->nlp_fc4_type = fc4_type;
-
- if (ndlp->nlp_type & NLP_FCP_TARGET)
- lpfc_setup_disc_node(vport, Did);
-
- else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
- 0, Did) == 0)
- vport->num_disc_nodes++;
-
- else
- lpfc_setup_disc_node(vport, Did);
- }
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET))) {
+ if (fc4_type == FC_TYPE_FCP)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+ if (fc4_type == FC_TYPE_NVME)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_setup_disc_node(vport, Did);
+ } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+ else
+ lpfc_setup_disc_node(vport, Did);
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 4bcb92c844ca..5cc8b0f7d885 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -323,7 +323,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
raw_index = phba->hbq_get[i];
getidx = le32_to_cpu(raw_index);
len += snprintf(buf+len, size-len,
- "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+ "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
@@ -550,8 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp;
unsigned char *statep;
struct nvme_fc_local_port *localport;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport;
@@ -623,6 +621,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ len += snprintf(buf + len,
+ size - len, "NVME_TGT sid:%d ",
+ NLP_NO_SID);
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ len += snprintf(buf + len,
+ size - len, "NVME_INITIATOR ");
len += snprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
@@ -660,7 +665,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
goto out_exit;
spin_lock_irq(shost->host_lock);
- lport = (struct lpfc_nvme_lport *)localport->private;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -673,9 +677,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
localport->port_id, statep);
len += snprintf(buf + len, size - len, "\tRport List:\n");
- list_for_each_entry(rport, &lport->rport_list, list) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- nrport = rport->remoteport;
+ if (!ndlp->nrport)
+ continue;
+
+ nrport = ndlp->nrport->remoteport;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -698,26 +705,23 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
nrport->port_name);
len += snprintf(buf + len, size - len, "WWNN x%llx ",
nrport->node_name);
- switch (nrport->port_role) {
- case FC_PORT_ROLE_NVME_INITIATOR:
+
+ /* An NVME rport can have multiple roles. */
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
len += snprintf(buf + len, size - len,
- "NVME INITIATOR ");
- break;
- case FC_PORT_ROLE_NVME_TARGET:
+ "INITIATOR ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
len += snprintf(buf + len, size - len,
- "NVME TARGET ");
- break;
- case FC_PORT_ROLE_NVME_DISCOVERY:
+ "TARGET ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
len += snprintf(buf + len, size - len,
- "NVME DISCOVERY ");
- break;
- default:
+ "DISCSRVC ");
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ FC_PORT_ROLE_NVME_TARGET |
+ FC_PORT_ROLE_NVME_DISCOVERY))
len += snprintf(buf + len, size - len,
"UNKNOWN ROLE x%x",
nrport->port_role);
- break;
- }
-
/* Terminate the string. */
len += snprintf(buf + len, size - len, "\n");
}
@@ -746,6 +750,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -843,11 +848,21 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
}
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
len += snprintf(buf + len, size - len,
- "IO_CTX: %08x outstanding %08x total %08x\n",
- phba->sli4_hba.nvmet_ctx_cnt,
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n",
+ phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total);
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
} else {
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
@@ -856,18 +871,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
- "LS: Xmt %016llx Cmpl %016llx\n",
- phba->fc4NvmeLsRequests,
- phba->fc4NvmeLsCmpls);
+ "LS: Xmt %016x Cmpl %016x\n",
+ atomic_read(&phba->fc4NvmeLsRequests),
+ atomic_read(&phba->fc4NvmeLsCmpls));
+
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(&phba->fc4NvmeInputRequests);
+ data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf + len, size - len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
- phba->fc4NvmeInputRequests,
- phba->fc4NvmeOutputRequests,
- phba->fc4NvmeControlRequests);
+ data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
+ " Cmpl %016llx Outstanding %016llx\n",
+ tot, (data1 + data2 + data3) - tot);
}
return len;
@@ -1949,10 +1968,6 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (nbytes > 64)
nbytes = 64;
- /* Protect copy from user */
- if (!access_ok(VERIFY_READ, buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2037,10 +2052,6 @@ lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
if (nbytes > 64)
nbytes = 64;
- /* Protect copy from user */
- if (!access_ok(VERIFY_READ, buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2169,10 +2180,6 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
if (nbytes > 64)
nbytes = 64;
- /* Protect copy from user */
- if (!access_ok(VERIFY_READ, buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2280,10 +2287,6 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if (nbytes > 64)
nbytes = 64;
- /* Protect copy from user */
- if (!access_ok(VERIFY_READ, buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
if (copy_from_user(mybuf, buf, nbytes))
@@ -2354,10 +2357,6 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
int i;
size_t bsize;
- /* Protect copy from user */
- if (!access_ok(VERIFY_READ, buf, nbytes))
- return -EFAULT;
-
memset(mybuf, 0, sizeof(mybuf));
memset(idiag_cmd, 0, sizeof(*idiag_cmd));
bsize = min(nbytes, (sizeof(mybuf)-1));
@@ -3249,9 +3248,9 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx]\n",
+ "bs:x%x proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
- (unsigned long long)qp->q_cnt_4);
+ (unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e532b39ae93..6d1d6f691df4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2168,6 +2168,19 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type, ndlp->nlp_DID);
return 1;
}
+
+ /* SLI3 ports don't support NVME. If this rport is a strict NVME
+ * FC4 type, implicitly LOGO.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV3 &&
+ ndlp->nlp_fc4_type == NLP_FC4_NVME) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
+ ndlp->nlp_type);
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return 1;
+ }
+
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, elscmd);
if (!elsiocb)
@@ -2268,7 +2281,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
*/
- if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
return 0;
@@ -3332,6 +3346,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
+ /* The driver has a VALID PLOGI but the rport has
+ * rejected the PRLI - can't do it now. Delay
+ * for 1 second and try again - don't care about
+ * the explanation.
+ */
+ if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
+ delay = 1000;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
+ break;
+ }
+
+ /* Legacy bug fix code for targets with PLOGI delays. */
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_CMD_IN_PROGRESS) {
if (cmd == ELS_CMD_PLOGI) {
@@ -3350,9 +3377,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
break;
}
- if ((cmd == ELS_CMD_PLOGI) ||
- (cmd == ELS_CMD_PRLI) ||
- (cmd == ELS_CMD_NVMEPRLI)) {
+ if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
@@ -5678,27 +5703,13 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (beacon->lcb_frequency == 0) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
- (beacon->lcb_type != LPFC_LCB_AMBER)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
- (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
- (beacon->lcb_type != LPFC_LCB_GREEN) &&
- (beacon->lcb_type != LPFC_LCB_AMBER)) {
+ if (beacon->lcb_sub_command != LPFC_LCB_ON &&
+ beacon->lcb_sub_command != LPFC_LCB_OFF) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (be16_to_cpu(beacon->lcb_duration) != 0) {
+ if (beacon->lcb_sub_command == LPFC_LCB_ON &&
+ be16_to_cpu(beacon->lcb_duration) != 0) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3ffcd9215ca8..aa5e5ff56dfb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4167,14 +4167,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unregister_remote_port(ndlp);
}
- /* Notify the NVME transport of this rport's loss on the
- * Initiator. For NVME Target, should upcall transport
- * in the else clause when API available.
- */
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
if (vport->phba->nvmet_support == 0)
+ /* Start devloss */
lpfc_nvme_unregister_port(vport, ndlp);
+ else
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
}
}
@@ -4182,8 +4182,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
- (ndlp->nlp_DID == Fabric_DID)) {
+ if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+ ndlp->nlp_DID == Fabric_DID ||
+ ndlp->nlp_DID == NameServer_DID ||
+ ndlp->nlp_DID == FDMI_DID) {
vport->phba->nport_event_cnt++;
/*
* Tell the fc transport about the port, if we haven't
@@ -4192,7 +4194,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_register_remote_port(vport, ndlp);
}
/* Notify the NVME transport of this new rport. */
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
* Initiators take the NDLP ref count in
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index e0a5fce416ae..bb4715705fa3 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -197,6 +197,7 @@ struct lpfc_sli_intf {
/* Delay Multiplier constant */
#define LPFC_DMULT_CONST 651042
+#define LPFC_DMULT_MAX 1023
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
@@ -657,6 +658,15 @@ struct lpfc_register {
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418
+#define lpfc_sliport_eqdelay_delay_SHIFT 16
+#define lpfc_sliport_eqdelay_delay_MASK 0xffff
+#define lpfc_sliport_eqdelay_delay_WORD word0
+#define lpfc_sliport_eqdelay_id_SHIFT 0
+#define lpfc_sliport_eqdelay_id_MASK 0xfff
+#define lpfc_sliport_eqdelay_id_WORD word0
+#define LPFC_SEC_TO_USEC 1000000
+
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
*/
@@ -3258,6 +3268,10 @@ struct lpfc_sli4_parameters {
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
+#define cfg_eqdr_SHIFT 8
+#define cfg_eqdr_MASK 0x00000001
+#define cfg_eqdr_WORD word19
+#define LPFC_NODELAY_MAX_IO 32
};
#define LPFC_SET_UE_RECOVERY 0x10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9add9473cae5..491aa95eb0f6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1249,6 +1249,12 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
int retval, i;
struct lpfc_sli *psli = &phba->sli;
LIST_HEAD(completions);
+ struct lpfc_queue *qp;
+ unsigned long time_elapsed;
+ uint32_t tick_cqe, max_cqe, val;
+ uint64_t tot, data1, data2, data3;
+ struct lpfc_register reg_data;
+ void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -1263,6 +1269,98 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
+ if (phba->cfg_auto_imax) {
+ if (!phba->last_eqdelay_time) {
+ phba->last_eqdelay_time = jiffies;
+ goto skip_eqdelay;
+ }
+ time_elapsed = jiffies - phba->last_eqdelay_time;
+ phba->last_eqdelay_time = jiffies;
+
+ tot = 0xffff;
+ /* Check outstanding IO count */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->nvmet_support) {
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ } else {
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(
+ &phba->fc4NvmeInputRequests);
+ data2 = atomic_read(
+ &phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(
+ &phba->fc4NvmeControlRequests);
+ tot = (data1 + data2 + data3) - tot;
+ }
+ }
+
+ /* Interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax / phba->io_channel_irqs;
+ tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
+
+ /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
+ max_cqe = time_elapsed * tick_cqe;
+
+ for (i = 0; i < phba->io_channel_irqs; i++) {
+ /* Fast-path EQ */
+ qp = phba->sli4_hba.hba_eq[i];
+ if (!qp)
+ continue;
+
+ /* Use no EQ delay if we don't have many outstanding
+ * IOs, or if we are only processing 1 CQE/ISR or less.
+ * Otherwise, assume we can process up to lpfc_fcp_imax
+ * interrupts per HBA.
+ */
+ if (tot < LPFC_NODELAY_MAX_IO ||
+ qp->EQ_cqe_cnt <= max_cqe)
+ val = 0;
+ else
+ val = phba->cfg_fcp_imax;
+
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+ /* Use EQ Delay Register method */
+
+ /* Convert for EQ Delay register */
+ if (val) {
+ /* First, interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax /
+ phba->io_channel_irqs;
+
+ /* us delay between each interrupt */
+ val = LPFC_SEC_TO_USEC / val;
+ }
+ if (val != qp->q_mode) {
+ reg_data.word0 = 0;
+ bf_set(lpfc_sliport_eqdelay_id,
+ &reg_data, qp->queue_id);
+ bf_set(lpfc_sliport_eqdelay_delay,
+ &reg_data, val);
+ writel(reg_data.word0, eqdreg);
+ }
+ } else {
+ /* Use mbox command method */
+ if (val != qp->q_mode)
+ lpfc_modify_hba_eq_delay(phba, i,
+ 1, val);
+ }
+
+ /*
+ * val is cfg_fcp_imax or 0 for mbox delay or us delay
+ * between interrupts for EQDR.
+ */
+ qp->q_mode = val;
+ qp->EQ_cqe_cnt = 0;
+ }
+ }
+
+skip_eqdelay:
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
@@ -2707,13 +2805,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- /* Remove the NVME transport reference now and
- * continue to remove the node.
- */
- lpfc_nlp_put(ndlp);
- }
-
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
@@ -3392,7 +3483,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
@@ -3596,14 +3686,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
MAILBOX_t *mb;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- /* Reset the port first */
- lpfc_sli_brdrestart(phba);
- rc = lpfc_sli_chipset_init(phba);
- if (rc)
- return (uint64_t)-1;
- }
-
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq)
@@ -3757,8 +3839,19 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
int i;
uint64_t wwn;
bool use_no_reset_hba = false;
+ int rc;
- wwn = lpfc_get_wwpn(phba);
+ if (lpfc_no_hba_reset_cnt) {
+ if (phba->sli_rev < LPFC_SLI_REV4 &&
+ dev == &phba->pcidev->dev) {
+ /* Reset the port first */
+ lpfc_sli_brdrestart(phba);
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ return NULL;
+ }
+ wwn = lpfc_get_wwpn(phba);
+ }
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
if (wwn == lpfc_no_hba_reset[i]) {
@@ -5837,7 +5930,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */
@@ -5846,7 +5940,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
- spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/*
@@ -6731,6 +6826,16 @@ lpfc_create_shost(struct lpfc_hba *phba)
phba->fc_arbtov = FF_DEF_ARBTOV;
atomic_set(&phba->sdev_cnt, 0);
+ atomic_set(&phba->fc4ScsiInputRequests, 0);
+ atomic_set(&phba->fc4ScsiOutputRequests, 0);
+ atomic_set(&phba->fc4ScsiControlRequests, 0);
+ atomic_set(&phba->fc4ScsiIoCmpls, 0);
+ atomic_set(&phba->fc4NvmeInputRequests, 0);
+ atomic_set(&phba->fc4NvmeOutputRequests, 0);
+ atomic_set(&phba->fc4NvmeControlRequests, 0);
+ atomic_set(&phba->fc4NvmeIoCmpls, 0);
+ atomic_set(&phba->fc4NvmeLsRequests, 0);
+ atomic_set(&phba->fc4NvmeLsCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
return -ENODEV;
@@ -7247,6 +7352,9 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
break;
case LPFC_SLI_INTF_IF_TYPE_2:
+ phba->sli4_hba.u.if_type2.EQDregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_EQ_DELAY_OFFSET;
phba->sli4_hba.u.if_type2.ERR1regaddr =
phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_ER1_OFFSET;
@@ -8773,7 +8881,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
- lpfc_modify_hba_eq_delay(phba, qidx);
+ lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
+ phba->cfg_fcp_imax);
return 0;
@@ -9655,6 +9764,7 @@ static int
lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
+ char *name;
/* Set up MSI-X multi-message vectors */
vectors = phba->io_channel_irqs;
@@ -9673,9 +9783,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- memset(&phba->sli4_hba.handler_name[index], 0, 16);
- snprintf((char *)&phba->sli4_hba.handler_name[index],
- LPFC_SLI4_HANDLER_NAME_SZ,
+ name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
+ snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
phba->sli4_hba.hba_eq_hdl[index].idx = index;
@@ -9684,12 +9794,12 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
if (phba->cfg_fof && (index == (vectors - 1)))
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_fof_intr_handler, 0,
- (char *)&phba->sli4_hba.handler_name[index],
+ name,
&phba->sli4_hba.hba_eq_hdl[index]);
else
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- (char *)&phba->sli4_hba.handler_name[index],
+ name,
&phba->sli4_hba.hba_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -10241,6 +10351,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
+ if (bf_get(cfg_eqdr, mbx_sli4_parameters))
+ phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
+
/* Make sure that sge_supp_len can be handled by the driver */
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8008c8205fb6..0a0a1b92d01d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -186,13 +186,12 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
/* Remove this rport from the lport's list - memory is owned by the
* transport. Remove the ndlp reference for the NVME transport before
- * calling state machine to remove the node, this is devloss = 0
- * semantics.
+ * calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6146 remoteport delete complete %p\n",
remoteport);
- list_del(&rport->list);
+ ndlp->nrport = NULL;
lpfc_nlp_put(ndlp);
rport_err:
@@ -212,7 +211,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
- vport->phba->fc4NvmeLsCmpls++;
+ atomic_inc(&vport->phba->fc4NvmeLsCmpls);
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
@@ -479,7 +478,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
- vport->phba->fc4NvmeLsRequests++;
+ atomic_inc(&vport->phba->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work.
@@ -774,7 +773,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqe);
return;
}
- phba->fc4NvmeIoCmpls++;
+ atomic_inc(&phba->fc4NvmeIoCmpls);
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
@@ -999,7 +998,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_WRITE_CMD);
- phba->fc4NvmeOutputRequests++;
+ atomic_inc(&phba->fc4NvmeOutputRequests);
} else {
/* Word 7 */
bf_set(wqe_cmnd, &wqe->generic.wqe_com,
@@ -1020,7 +1019,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_READ_CMD);
- phba->fc4NvmeInputRequests++;
+ atomic_inc(&phba->fc4NvmeInputRequests);
}
} else {
/* Word 4 */
@@ -1041,7 +1040,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 11 */
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
- phba->fc4NvmeControlRequests++;
+ atomic_inc(&phba->fc4NvmeControlRequests);
}
/*
* Finish initializing those WQE fields that are independent
@@ -1362,6 +1361,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
return 0;
out_free_nvme_buf:
+ if (lpfc_ncmd->nvmeCmd->sg_cnt) {
+ if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
+ atomic_dec(&phba->fc4NvmeOutputRequests);
+ else
+ atomic_dec(&phba->fc4NvmeInputRequests);
+ } else
+ atomic_dec(&phba->fc4NvmeControlRequests);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail:
return ret;
@@ -1421,7 +1427,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nodelist *ndlp;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
@@ -1443,38 +1448,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id,
pnvme_fcreq);
- /*
- * Catch race where our node has transitioned, but the
- * transport is still transitioning.
- */
- ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
- "6054 rport %p, ndlp %p, DID x%06x ndlp "
- " not ready.\n",
- rport, ndlp, pnvme_rport->port_id);
-
- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6055 Could not find node for "
- "DID %x\n",
- pnvme_rport->port_id);
- return;
- }
- }
-
- /* The remote node has to be ready to send an abort. */
- if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
- !(ndlp->nlp_type & NLP_NVME_TARGET)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6048 rport %p, DID x%06x not ready for "
- "IO. State x%x, Type x%x\n",
- rport, pnvme_rport->port_id,
- ndlp->nlp_state, ndlp->nlp_type);
- return;
- }
-
/* If the hba is getting reset, this flag is set. It is
* cleared when the reset is complete and rings reestablished.
*/
@@ -1535,7 +1508,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
- nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
+ nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
@@ -2208,7 +2181,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
- INIT_LIST_HEAD(&lport->rport_list);
vport->nvmei_support = 1;
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
vport->phba->total_nvme_bufs += len;
@@ -2233,7 +2205,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
int ret;
if (vport->nvmei_support == 0)
@@ -2246,19 +2217,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
localport);
- list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
- /* The last node ref has to get released now before the rport
- * private memory area is released by the transport.
- */
- list_del(&rport->list);
-
- init_completion(&rport->rport_unreg_done);
- ret = nvme_fc_unregister_remoteport(rport->remoteport);
- if (ret)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6008 rport fail destroy %x\n", ret);
- wait_for_completion_timeout(&rport->rport_unreg_done, 5);
- }
/* lport's rport list is clear. Unregister
* lport and release resources.
@@ -2340,99 +2298,68 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
localport = vport->localport;
lport = (struct lpfc_nvme_lport *)localport->private;
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
-
- /* The driver isn't expecting the rport wwn to change
- * but it might get a different DID on a different
- * fabric.
+ /* NVME rports are not preserved across devloss.
+ * Just register this instance. Note, rpinfo->dev_loss_tmo
+ * is left 0 to indicate accept transport defaults. The
+ * driver communicates port role capabilities consistent
+ * with the PRLI response data.
+ */
+ memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
+ rpinfo.port_id = ndlp->nlp_DID;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
+
+ if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+ rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
+ if (!ret) {
+ /* If the ndlp already has an nrport, this is just
+ * a resume of the existing rport. Else this is a
+ * new rport.
*/
- list_for_each_entry(rport, &lport->rport_list, list) {
- if (rport->remoteport->port_name !=
- wwn_to_u64(ndlp->nlp_portname.u.wwn))
- continue;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
- "6035 lport %p, found matching rport "
- "at wwpn 0x%llx, Data: x%x x%x x%x "
- "x%06x\n",
- lport,
- rport->remoteport->port_name,
- rport->remoteport->port_id,
- rport->remoteport->port_role,
+ rport = remote_port->private;
+ if (ndlp->nrport == rport) {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "rport wwpn 0x%llx, "
+ "Data: x%x x%x x%x x%06x\n",
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
ndlp->nlp_type,
ndlp->nlp_DID);
- remote_port = rport->remoteport;
- if ((remote_port->port_id == 0) &&
- (remote_port->port_role ==
- FC_PORT_ROLE_NVME_DISCOVERY)) {
- remote_port->port_id = ndlp->nlp_DID;
- remote_port->port_role &=
- ~FC_PORT_ROLE_NVME_DISCOVERY;
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- remote_port->port_role |=
- FC_PORT_ROLE_NVME_TARGET;
- if (ndlp->nlp_type & NLP_NVME_INITIATOR)
- remote_port->port_role |=
- FC_PORT_ROLE_NVME_INITIATOR;
-
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
- }
- return 0;
- }
-
- /* NVME rports are not preserved across devloss.
- * Just register this instance.
- */
- rpinfo.port_id = ndlp->nlp_DID;
- rpinfo.port_role = 0;
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
- if (ndlp->nlp_type & NLP_NVME_INITIATOR)
- rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
- rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
- rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
- ret = nvme_fc_register_remoteport(localport, &rpinfo,
- &remote_port);
- if (!ret) {
- rport = remote_port->private;
+ } else {
+ /* New rport. */
rport->remoteport = remote_port;
rport->lport = lport;
rport->ndlp = lpfc_nlp_get(ndlp);
if (!rport->ndlp)
return -1;
ndlp->nrport = rport;
- INIT_LIST_HEAD(&rport->list);
- list_add_tail(&rport->list, &lport->rport_list);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to lport %p "
- "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
- "DID x%06x Role x%x\n",
+ "6022 Binding new rport to "
+ "lport %p Rport WWNN 0x%llx, "
+ "Rport WWPN 0x%llx DID "
+ "x%06x Role x%x\n",
lport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role);
- } else {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_NVME_DISC | LOG_NODE,
- "6031 RemotePort Registration failed "
- "err: %d, DID x%06x\n",
- ret, ndlp->nlp_DID);
}
} else {
- ret = -EINVAL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6027 Unknown nlp_type x%x on DID x%06x "
- "ndlp %p. Not Registering nvme rport\n",
- ndlp->nlp_type, ndlp->nlp_DID, ndlp);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE,
+ "6031 RemotePort Registration failed "
+ "err: %d, DID x%06x\n",
+ ret, ndlp->nlp_DID);
}
+
return ret;
#else
return 0;
@@ -2460,7 +2387,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport;
- unsigned long wait_tmo;
localport = vport->localport;
@@ -2491,6 +2417,10 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
init_completion(&rport->rport_unreg_done);
+
+ /* No concern about the role change on the nvme remoteport.
+ * The transport will update it.
+ */
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
@@ -2499,17 +2429,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ret, remoteport->port_state);
}
- /* Wait for the driver's delete completion routine to finish
- * before proceeding. This guarantees the transport and driver
- * have completed the unreg process.
- */
- wait_tmo = msecs_to_jiffies(5000);
- ret = wait_for_completion_timeout(&rport->rport_unreg_done,
- wait_tmo);
- if (ret == 0) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 Unreg nvme wait timeout\n");
- }
}
return;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index ec32f45daa66..d192bb268f99 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -35,13 +35,11 @@ struct lpfc_nvme_qhandle {
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
- struct list_head rport_list;
struct completion lport_unreg_done;
/* Add sttats counters here */
};
struct lpfc_nvme_rport {
- struct list_head list;
struct lpfc_nvme_lport *lport;
struct nvme_fc_remote_port *remoteport;
struct lpfc_nodelist *ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 518b15e6f222..fbeec344c6cc 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -112,6 +112,15 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
+ ctxp = cmdwqe->context2;
+
+ if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6410 NVMET LS cmpl state mismatch IO x%x: "
+ "%d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+
if (!phba->targetport)
goto out;
@@ -123,15 +132,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
out:
- ctxp = cmdwqe->context2;
rsp = &ctxp->ctx.ls_req;
lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
ctxp->oxid, status, result);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
- ctxp, status, result);
+ "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
+ status, result, ctxp->oxid);
lpfc_nlp_put(cmdwqe->context1);
cmdwqe->context2 = NULL;
@@ -162,7 +170,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
- struct lpfc_dmabuf *hbufp;
uint32_t *payload;
uint32_t size, oxid, sid, rc;
unsigned long iflag;
@@ -173,11 +180,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->txrdy = NULL;
ctxp->txrdy_phys = 0;
}
+
+ if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6411 NVMET free, already free IO x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
ctxp->state = LPFC_NVMET_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
if (phba->sli4_hba.nvmet_io_wait_cnt) {
- hbufp = &nvmebuf->hbuf;
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
nvmebuf, struct rqb_dmabuf,
hbuf.list);
@@ -193,7 +205,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
- memset(ctxp, 0, sizeof(ctxp->ctx));
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0;
@@ -256,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- phba->sli4_hba.nvmet_ctx_cnt++;
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_put_cnt++;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
#endif
}
@@ -580,8 +591,17 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
int rc;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6023 %s: Entrypoint ctx %p %p\n", __func__,
- ctxp, tgtport);
+ "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
+
+ if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
+ (ctxp->entry_cnt != 1)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6412 NVMET LS rsp state mismatch "
+ "oxid x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+ ctxp->state = LPFC_NVMET_STE_LS_RSP;
+ ctxp->entry_cnt++;
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
rsp->rsplen);
@@ -751,15 +771,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
unsigned long flags;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6103 Abort op: oxri x%x flg x%x cnt %d\n",
- ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+ "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
- lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
- "xri x%x flg x%x cnt x%x\n",
- ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+ lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
- ctxp->entry_cnt++;
+
spin_lock_irqsave(&ctxp->ctxlock, flags);
/* Since iaab/iaar are NOT set, we need to check
@@ -770,12 +789,17 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
return;
}
ctxp->flag |= LPFC_NVMET_ABORT_OP;
- if (ctxp->flag & LPFC_NVMET_IO_INP)
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
+
+ /* An state of LPFC_NVMET_STE_RCV means we have just received
+ * the NVME command and have not started processing it.
+ * (by issuing any IO WQEs on this exchange yet)
+ */
+ if (ctxp->state == LPFC_NVMET_STE_RCV)
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
+ else
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
}
@@ -790,6 +814,13 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
unsigned long flags;
bool aborting = false;
+ if (ctxp->state != LPFC_NVMET_STE_DONE &&
+ ctxp->state != LPFC_NVMET_STE_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6413 NVMET release bad state %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ }
+
spin_lock_irqsave(&ctxp->ctxlock, flags);
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
@@ -828,37 +859,55 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
};
-void
+static void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags;
- list_for_each_entry_safe(
- ctx_buf, next_ctx_buf,
- &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
- spin_lock_irqsave(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_del_init(&ctx_buf->list);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ __lpfc_clear_active_sglq(phba,
+ ctx_buf->sglq->sli4_lxritag);
+ ctx_buf->sglq->state = SGL_FREED;
+ ctx_buf->sglq->ndlp = NULL;
+
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_add_tail(&ctx_buf->sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+
+ lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+ kfree(ctx_buf->context);
+ }
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock_irqrestore(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
- spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
- flags);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
}
-int
+static int
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf;
@@ -891,6 +940,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
return -ENOMEM;
}
ctx_buf->context->ctxbuf = ctx_buf;
+ ctx_buf->context->state = LPFC_NVMET_STE_FREE;
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!ctx_buf->iocbq) {
@@ -926,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
- spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
}
- phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+ phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0;
}
@@ -1103,7 +1153,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6318 XB aborted %x flg x%x (%x)\n",
+ "6318 XB aborted oxid %x flg x%x (%x)\n",
ctxp->oxid, ctxp->flag, released);
if (released)
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1253,7 +1303,8 @@ dropit:
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->wqeq = NULL;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVMET_STE_LS_RCV;
+ ctxp->entry_cnt = 1;
ctxp->rqb_buffer = (void *)nvmebuf;
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
@@ -1268,8 +1319,8 @@ dropit:
payload, size);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
- "%08x %08x %08x\n", __func__, ctxp, size, rc,
+ "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
+ "%08x %08x %08x\n", size, rc,
*payload, *(payload+1), *(payload+2),
*(payload+3), *(payload+4), *(payload+5));
@@ -1337,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
goto dropit;
}
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
- if (phba->sli4_hba.nvmet_ctx_cnt) {
- list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+ if (phba->sli4_hba.nvmet_ctx_get_cnt) {
+ list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
- phba->sli4_hba.nvmet_ctx_cnt--;
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ if (phba->sli4_hba.nvmet_ctx_put_cnt) {
+ list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_get_cnt =
+ phba->sli4_hba.nvmet_ctx_put_cnt;
+ phba->sli4_hba.nvmet_ctx_put_cnt = 0;
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+
+ list_remove_head(
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+ ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ }
}
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -1383,7 +1452,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
- memset(ctxp, 0, sizeof(ctxp->ctx));
+ if (ctxp->state != LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6414 NVMET Context corrupt %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ }
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0;
@@ -1547,9 +1620,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6104 lpfc_nvmet_prep_ls_wqe: link err: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6104 NVMET prep LS wqe: link err: "
+ "NPORT x%x oxid:x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1557,9 +1630,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe = lpfc_sli_get_iocbq(phba);
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6105 NVMET prep LS wqe: No WQE: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1568,9 +1641,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6106 NVMET prep LS wqe: No ndlp: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
goto nvme_wqe_free_wqeq_exit;
}
ctxp->wqeq = nvmewqe;
@@ -1642,9 +1715,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
- /* Xmit NVME response to remote NPORT <did> */
+ /* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6039 Xmit NVME LS response to remote "
+ "6039 Xmit NVMET LS response to remote "
"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
rspsize);
@@ -1676,9 +1749,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
- "NPORT x%x oxid:x%x\n", ctxp->sid,
- ctxp->oxid);
+ "6107 NVMET prep FCP wqe: link err:"
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1687,17 +1760,18 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6108 NVMET prep FCP wqe: no ndlp: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
- "NPORT x%x oxid:x%x cnt %d\n",
- ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
+ "6109 NVMET prep FCP wqe: seg cnt err: "
+ "NPORT x%x oxid x%x ste %d cnt %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state,
+ phba->cfg_nvme_seg_cnt);
return NULL;
}
@@ -1708,9 +1782,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6110 lpfc_nvmet_prep_fcp_wqe: No "
- "WQE: NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6110 NVMET prep FCP wqe: No "
+ "WQE: NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
ctxp->wqeq = nvmewqe;
@@ -1722,13 +1796,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Sanity check */
if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
(ctxp->entry_cnt == 1)) ||
- ((ctxp->state == LPFC_NVMET_STE_DATA) &&
- (ctxp->entry_cnt > 1))) {
+ (ctxp->state == LPFC_NVMET_STE_DATA)) {
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6111 Wrong state %s: %d cnt %d\n",
- __func__, ctxp->state, ctxp->entry_cnt);
+ "6111 Wrong state NVMET FCP: %d cnt %d\n",
+ ctxp->state, ctxp->entry_cnt);
return NULL;
}
@@ -1832,7 +1905,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
}
- ctxp->state = LPFC_NVMET_STE_DATA;
break;
case NVMET_FCOP_WRITEDATA:
@@ -1923,7 +1995,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = 0;
sgl++;
- ctxp->state = LPFC_NVMET_STE_DATA;
atomic_inc(&tgtp->xmt_fcp_write);
break;
@@ -1980,7 +2051,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
FCP_COMMAND_TRSP);
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
- ctxp->state = LPFC_NVMET_STE_RSP;
if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
/* Good response - all zero's on wire */
@@ -2029,6 +2099,8 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
sgl++;
ctxp->offset += cnt;
}
+ ctxp->state = LPFC_NVMET_STE_DATA;
+ ctxp->entry_cnt++;
return nvmewqe;
}
@@ -2124,10 +2196,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
- atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
-
if (!ctxp) {
/* if context is clear, related io alrady complete */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
@@ -2137,6 +2205,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
return;
}
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
+
/* Sanity check */
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2206,17 +2278,32 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
+ "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
ctxp, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- if (ctxp) {
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
- lpfc_sli_release_iocbq(phba, cmdwqe);
- kfree(ctxp);
- } else
+ if (!ctxp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6415 NVMET LS Abort No ctx: WCQE: "
+ "%08x %08x %08x %08x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+
lpfc_sli_release_iocbq(phba, cmdwqe);
+ return;
+ }
+
+ if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6416 NVMET LS abort cmpl state mismatch: "
+ "oxid x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ kfree(ctxp);
}
static int
@@ -2240,7 +2327,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -2250,7 +2337,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe;
- ctxp->state = LPFC_NVMET_STE_ABORT;
/*
* Since we zero the whole WQE, we need to ensure we set the WQE fields
@@ -2338,7 +2424,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6160 Drop ABORT - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -2351,7 +2437,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
@@ -2437,6 +2523,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->iocb_cmpl = 0;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
+ abts_wqeq->vport = phba->pport;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
@@ -2471,6 +2558,15 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->wqeq->hba_wqidx = 0;
}
+ if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ rc = WQE_BUSY;
+ goto aerr;
+ }
+ ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->entry_cnt++;
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
if (rc == 0)
goto aerr;
@@ -2487,10 +2583,9 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- atomic_inc(&tgtp->xmt_abort_rsp_error);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
return 1;
@@ -2507,12 +2602,24 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
unsigned long flags;
int rc;
+ if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
+ (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
+ ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ ctxp->entry_cnt++;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6418 NVMET LS abort state mismatch "
+ "IO x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ }
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) {
/* Issue ABTS for this WQE based on iotag */
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->wqeq) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6068 Abort failed: No wqeqs: "
"xri: x%x\n", xri);
/* No failure to an ABTS request. */
@@ -2523,7 +2630,10 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe;
- lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
+ if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
+ rc = WQE_BUSY;
+ goto out;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
@@ -2535,13 +2645,13 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_unsol);
return 0;
}
-
+out:
atomic_inc(&tgtp->xmt_abort_rsp_error);
abts_wqeq->context2 = NULL;
abts_wqeq->context3 = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
kfree(ctxp);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 6eb2f5d8d4ed..e675ef17be08 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -93,12 +93,14 @@ struct lpfc_nvmet_rcv_ctx {
uint16_t cpu;
uint16_t state;
/* States */
-#define LPFC_NVMET_STE_FREE 0
-#define LPFC_NVMET_STE_RCV 1
-#define LPFC_NVMET_STE_DATA 2
-#define LPFC_NVMET_STE_ABORT 3
-#define LPFC_NVMET_STE_RSP 4
-#define LPFC_NVMET_STE_DONE 5
+#define LPFC_NVMET_STE_LS_RCV 1
+#define LPFC_NVMET_STE_LS_ABORT 2
+#define LPFC_NVMET_STE_LS_RSP 3
+#define LPFC_NVMET_STE_RCV 4
+#define LPFC_NVMET_STE_DATA 5
+#define LPFC_NVMET_STE_ABORT 6
+#define LPFC_NVMET_STE_DONE 7
+#define LPFC_NVMET_STE_FREE 0xff
uint16_t flag;
#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 54fd0c81ceaf..cfe1d01eb73f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3931,7 +3931,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
uint32_t logit = LOG_FCP;
- phba->fc4ScsiIoCmpls++;
+ atomic_inc(&phba->fc4ScsiIoCmpls);
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
@@ -4250,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- phba->fc4ScsiOutputRequests++;
+ atomic_inc(&phba->fc4ScsiOutputRequests);
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- phba->fc4ScsiInputRequests++;
+ atomic_inc(&phba->fc4ScsiInputRequests);
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- phba->fc4ScsiControlRequests++;
+ atomic_inc(&phba->fc4ScsiControlRequests);
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@@ -4640,7 +4640,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
(uint32_t)
(cmnd->request->timeout / 1000));
-
+ switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
+ case WRITE_DATA:
+ atomic_dec(&phba->fc4ScsiOutputRequests);
+ break;
+ case READ_DATA:
+ atomic_dec(&phba->fc4ScsiInputRequests);
+ break;
+ default:
+ atomic_dec(&phba->fc4ScsiControlRequests);
+ }
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d6b184839bc2..e948ea05fd33 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -968,6 +968,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
list_remove_head(lpfc_els_sgl_list, sglq,
struct lpfc_sglq, list);
if (sglq == start_sglq) {
+ list_add_tail(&sglq->list, lpfc_els_sgl_list);
sglq = NULL;
break;
} else
@@ -4302,7 +4303,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
/* Perform FCoE PCI function reset before freeing queue memory */
rc = lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
/* Restore PCI cmd register */
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
@@ -4427,6 +4427,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
pci_disable_pcie_error_reporting(phba->pcidev);
lpfc_hba_down_post(phba);
+ lpfc_sli4_queue_destroy(phba);
return rc;
}
@@ -6926,18 +6927,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
cnt = phba->cfg_iocb_cnt * 1024;
/* We need 1 iocbq for every SGL, for IO processing */
cnt += phba->sli4_hba.nvmet_xri_cnt;
- /* Initialize and populate the iocb list per host */
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
- rc = lpfc_init_iocb_list(phba, cnt);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1413 Failed to init iocb list.\n");
- goto out_destroy_queue;
- }
-
- lpfc_nvmet_create_targetport(phba);
} else {
/* update host scsi xri-sgl sizes and mappings */
rc = lpfc_sli4_scsi_sgl_update(phba);
@@ -6958,18 +6947,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
cnt = phba->cfg_iocb_cnt * 1024;
+ }
+
+ if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2820 initialize iocb list %d total %d\n",
+ "2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6301 Failed to init iocb list.\n");
+ "1413 Failed to init iocb list.\n");
goto out_destroy_queue;
}
}
+ if (phba->nvmet_support)
+ lpfc_nvmet_create_targetport(phba);
+
if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
/* Post initial buffers to all RQs created */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
@@ -7512,7 +7507,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mbx->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand,
+ phba->pport ? phba->pport->port_state : 0xff,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -7564,7 +7560,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mbx->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand,
+ phba->pport ? phba->pport->port_state : 0xff,
psli->sli_flag, flag);
if (mbx->mbxCommand != MBX_HEARTBEAT) {
@@ -10950,6 +10947,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *abtsiocb;
+ struct lpfc_sli_ring *pring_s4;
IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
int i;
@@ -11003,8 +11001,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* Setup callback routine and issue the command. */
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
- abtsiocb, 0);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
+ if (!pring_s4)
+ continue;
+ ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocb, 0);
+ } else
+ ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocb, 0);
if (ret_val == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
errcnt++;
@@ -13256,6 +13261,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n");
+ /* Drop thru */
case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13466,6 +13472,7 @@ process_cq:
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
+ cq->assoc_qp->EQ_cqe_cnt += ecount;
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
@@ -13547,6 +13554,9 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
return;
}
+ /* Save EQ associated with this CQ */
+ cq->assoc_qp = phba->sli4_hba.fof_eq;
+
/* Process all the entries to the OAS CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
@@ -13557,6 +13567,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
+ cq->assoc_qp->EQ_cqe_cnt += ecount;
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
@@ -13617,7 +13628,6 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
- eq->EQ_badstate++;
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
@@ -13729,7 +13739,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
- fpeq->EQ_badstate++;
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
@@ -13988,14 +13997,15 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
-lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
+lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+ uint32_t numq, uint32_t imax)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
struct lpfc_queue *eq;
int cnt, rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
- uint32_t result;
+ uint32_t result, val;
int qidx;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
@@ -14014,22 +14024,45 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay = &mbox->u.mqe.un.eq_delay;
/* Calculate delay multiper from maximum interrupt per second */
- result = phba->cfg_fcp_imax / phba->io_channel_irqs;
+ result = imax / phba->io_channel_irqs;
if (result > LPFC_DMULT_CONST || result == 0)
dmult = 0;
else
dmult = LPFC_DMULT_CONST/result - 1;
+ if (dmult > LPFC_DMULT_MAX)
+ dmult = LPFC_DMULT_MAX;
cnt = 0;
for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
eq = phba->sli4_hba.hba_eq[qidx];
if (!eq)
continue;
+ eq->q_mode = imax;
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
cnt++;
- if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
+
+ /* q_mode is only used for auto_imax */
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+ /* Use EQ Delay Register method for q_mode */
+
+ /* Convert for EQ Delay register */
+ val = phba->cfg_fcp_imax;
+ if (val) {
+ /* First, interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax /
+ phba->io_channel_irqs;
+
+ /* us delay between each interrupt */
+ val = LPFC_SEC_TO_USEC / val;
+ }
+ eq->q_mode = val;
+ } else {
+ eq->q_mode = imax;
+ }
+
+ if (cnt >= numq)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -16126,9 +16159,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
return rc;
}
-static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
-static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
-
/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
@@ -16203,22 +16233,18 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+ "2538 Received frame rctl:x%x, type:x%x, "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
- (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
- lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
- (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
- "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
- fc_hdr->fh_type, be32_to_cpu(header[0]),
- be32_to_cpu(header[1]), be32_to_cpu(header[2]),
- be32_to_cpu(header[3]), be32_to_cpu(header[4]),
- be32_to_cpu(header[5]), be32_to_cpu(header[6]));
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type,
+ be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+ be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+ be32_to_cpu(header[6]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2539 Dropped frame rctl:%s type:%s\n",
- lpfc_rctl_names[fc_hdr->fh_r_ctl],
- lpfc_type_names[fc_hdr->fh_type]);
+ "2539 Dropped frame rctl:x%x type:x%x\n",
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9085306ddd78..a3b1b5145d2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -321,6 +321,7 @@ struct lpfc_sli {
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
+#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
struct lpfc_sli_ring *sli3_ring;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf863db27700..7a1d74e9e877 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -168,7 +168,7 @@ struct lpfc_queue {
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
- uint16_t sgl_list_cnt;
+ uint32_t q_mode;
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -181,7 +181,7 @@ struct lpfc_queue {
/* defines for EQ stats */
#define EQ_max_eqe q_cnt_1
#define EQ_no_entry q_cnt_2
-#define EQ_badstate q_cnt_3
+#define EQ_cqe_cnt q_cnt_3
#define EQ_processed q_cnt_4
/* defines for CQ stats */
@@ -407,8 +407,10 @@ struct lpfc_max_cfg_param {
struct lpfc_hba;
/* SLI4 HBA multi-fcp queue handler struct */
+#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
atomic_t hba_eq_in_use;
struct cpumask *cpumask;
@@ -480,7 +482,6 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
struct lpfc_vector_map_info {
@@ -522,6 +523,7 @@ struct lpfc_sli4_hba {
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
+ void __iomem *EQDregaddr;
} if_type2;
} u;
@@ -548,7 +550,6 @@ struct lpfc_sli4_hba {
uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
- uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -620,7 +621,8 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
- uint16_t nvmet_ctx_cnt;
+ uint16_t nvmet_ctx_get_cnt;
+ uint16_t nvmet_ctx_put_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list;
@@ -629,7 +631,8 @@ struct lpfc_sli4_hba {
struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
- struct list_head lpfc_nvmet_ctx_list;
+ struct list_head lpfc_nvmet_ctx_get_list;
+ struct list_head lpfc_nvmet_ctx_put_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
@@ -661,7 +664,8 @@ struct lpfc_sli4_hba {
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
- spinlock_t nvmet_io_lock;
+ spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
+ spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;
@@ -755,7 +759,8 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
+int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+ uint32_t numq, uint32_t imax);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c2653244221c..c6a24c3e2d5e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.14"
+#define LPFC_DRIVER_VERSION "11.4.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 4cf9ed96414f..544d6f7e6138 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
kioc->pool_index = right_pool;
kioc->free_buf = 1;
- kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC,
&kioc->buf_paddr);
spin_unlock_irqrestore(&pool->lock, flags);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a5d872664257..22998cbd538f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev, false);
+ r = scsi_internal_device_block_nowait(sdev);
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2883,7 +2883,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
"handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
- r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
if (r == -EINVAL) {
/* The device has been set to SDEV_RUNNING by SD layer during
* device addition but the request queue is still stopped by
@@ -2895,14 +2895,14 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev, false);
+ r = scsi_internal_device_block_nowait(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
- r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 8a1b94816419..a4f28b7e4c65 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -446,7 +446,7 @@ static void _put_request(struct request *rq)
* code paths.
*/
if (unlikely(rq->bio))
- blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+ blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
else
blk_put_request(rq);
}
@@ -474,10 +474,10 @@ void osd_end_request(struct osd_request *or)
EXPORT_SYMBOL(osd_end_request);
static void _set_error_resid(struct osd_request *or, struct request *req,
- int error)
+ blk_status_t error)
{
or->async_error = error;
- or->req_errors = scsi_req(req)->result ? : error;
+ or->req_errors = scsi_req(req)->result;
or->sense_len = scsi_req(req)->sense_len;
if (or->sense_len)
memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
@@ -489,17 +489,19 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
int osd_execute_request(struct osd_request *or)
{
- int error;
-
blk_execute_rq(or->request->q, NULL, or->request, 0);
- error = scsi_req(or->request)->result ? -EIO : 0;
- _set_error_resid(or, or->request, error);
- return error;
+ if (scsi_req(or->request)->result) {
+ _set_error_resid(or, or->request, BLK_STS_IOERR);
+ return -EIO;
+ }
+
+ _set_error_resid(or, or->request, BLK_STS_OK);
+ return 0;
}
EXPORT_SYMBOL(osd_execute_request);
-static void osd_request_async_done(struct request *req, int error)
+static void osd_request_async_done(struct request *req, blk_status_t error)
{
struct osd_request *or = req->end_io_data;
@@ -1572,13 +1574,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
flags);
if (IS_ERR(req))
return req;
- scsi_req_init(req);
for_each_bio(bio) {
- struct bio *bounce_bio = bio;
-
- blk_queue_bounce(req->q, &bounce_bio);
- ret = blk_rq_append_bio(req, bounce_bio);
+ ret = blk_rq_append_bio(req, bio);
if (ret)
return ERR_PTR(ret);
}
@@ -1617,7 +1615,6 @@ static int _init_blk_request(struct osd_request *or,
ret = PTR_ERR(req);
goto out;
}
- scsi_req_init(req);
or->in.req = or->request->next_rq = req;
}
} else if (has_in)
@@ -1914,7 +1911,7 @@ analyze:
/* scsi sense is Empty, the request was never issued to target
* linux return code might tell us what happened.
*/
- if (or->async_error == -ENOMEM)
+ if (or->async_error == BLK_STS_RESOURCE)
osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
else
osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 67cbed92f07d..929ee7e88120 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -320,7 +320,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
/* Wakeup from interrupt */
-static void osst_end_async(struct request *req, int update)
+static void osst_end_async(struct request *req, blk_status_t status)
{
struct scsi_request *rq = scsi_req(req);
struct osst_request *SRpnt = req->end_io_data;
@@ -373,7 +373,6 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
return DRIVER_ERROR << 24;
rq = scsi_req(req);
- scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
SRpnt->bio = NULL;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 8c65e3b034dc..7d91e53562f8 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 617529b058f4..f9c50faa748e 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
index 11e0cc082ec0..5d5095e3d96d 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
index 9cb45410bc45..8fbe6e4d0b4f 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 07ee88200e91..4d038926a455 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 47720611ad2c..fa6727685627 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -8,6 +8,25 @@
*/
#include "qedf.h"
+inline bool qedf_is_vport(struct qedf_ctx *qedf)
+{
+ return qedf->lport->vport != NULL;
+}
+
+/* Get base qedf for physical port from vport */
+static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport;
+ struct fc_lport *base_lport;
+
+ if (!(qedf_is_vport(qedf)))
+ return NULL;
+
+ lport = qedf->lport;
+ base_lport = shost_priv(vport_to_shost(lport->vport));
+ return lport_priv(base_lport);
+}
+
static ssize_t
qedf_fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -26,34 +45,34 @@ qedf_fcoe_mac_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
+static ssize_t
+qedf_fka_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lport = shost_priv(class_to_shost(dev));
+ struct qedf_ctx *qedf = lport_priv(lport);
+ int fka_period = -1;
+
+ if (qedf_is_vport(qedf))
+ qedf = qedf_get_base_qedf(qedf);
+
+ if (qedf->ctlr.sel_fcf)
+ fka_period = qedf->ctlr.sel_fcf->fka_period;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
+}
+
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
+static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,
+ &dev_attr_fka_period,
NULL,
};
extern const struct qed_fcoe_ops *qed_ops;
-inline bool qedf_is_vport(struct qedf_ctx *qedf)
-{
- return (!(qedf->lport->vport == NULL));
-}
-
-/* Get base qedf for physical port from vport */
-static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
-{
- struct fc_lport *lport;
- struct fc_lport *base_lport;
-
- if (!(qedf_is_vport(qedf)))
- return NULL;
-
- lport = qedf->lport;
- base_lport = shost_priv(vport_to_shost(lport->vport));
- return (struct qedf_ctx *)(lport_priv(base_lport));
-}
-
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
{
struct qedf_ctx *base_qedf;
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 7d173f48a81e..50083cae84c3 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 00a1d6405ebe..2b1ef3075e93 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 QLogic Corporation
+ * Copyright (c) 2016-2017 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 90627033bde6..eb07f1de8afa 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -44,7 +44,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
goto els_err;
}
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
rc = -EINVAL;
goto els_err;
@@ -225,7 +225,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
fcport = aborted_io_req->fcport;
/* Check that fcport is still offloaded */
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
@@ -550,7 +550,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index e10b91cc3c62..aefd24ca9604 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -125,8 +125,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
sub = fiph->fip_subcode;
if (!qedf->vlan_hw_insert) {
- vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
- - sizeof(*eth_hdr));
+ vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
@@ -156,10 +155,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
struct fip_wwn_desc *wp;
struct fip_vn_desc *vp;
size_t rlen, dlen;
- uint32_t cvl_port_id;
- __u8 cvl_mac[ETH_ALEN];
u16 op;
u8 sub;
+ bool do_reset = false;
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
@@ -190,8 +188,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
return;
}
- cvl_port_id = 0;
- memset(cvl_mac, 0, ETH_ALEN);
/*
* We need to loop through the CVL descriptors to determine
* if we want to reset the fcoe link
@@ -205,7 +201,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_mac=%pM\n", mp->fd_mac);
- ether_addr_copy(cvl_mac, mp->fd_mac);
+ if (ether_addr_equal(mp->fd_mac,
+ qedf->ctlr.sel_fcf->fcf_mac))
+ do_reset = true;
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
@@ -217,7 +215,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
vp = (struct fip_vn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
- cvl_port_id = ntoh24(vp->fd_fc_id);
+ if (ntoh24(vp->fd_fc_id) ==
+ qedf->lport->port_id)
+ do_reset = true;
break;
default:
/* Ignore anything else */
@@ -228,11 +228,8 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
- cvl_mac);
- if (cvl_port_id == qedf->lport->port_id &&
- ether_addr_equal(cvl_mac,
- qedf->ctlr.sel_fcf->fcf_mac)) {
+ "do_reset=%d.\n", do_reset);
+ if (do_reset) {
fcoe_ctlr_link_down(&qedf->ctlr);
qedf_wait_for_upload(qedf);
fcoe_ctlr_link_up(&qedf->ctlr);
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index dfd65dec2874..7faef80c5f7a 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 1d7f90d0adc1..ded386036c27 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -1041,10 +1041,13 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
}
- memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (fcp_sns_len)
- memcpy(sc_cmd->sense_buffer, sense_data,
- fcp_sns_len);
+ /* The sense buffer can be NULL for TMF commands */
+ if (sc_cmd->sense_buffer) {
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, sense_data,
+ fcp_sns_len);
+ }
}
static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
@@ -1476,8 +1479,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
{
struct fc_lport *lport;
struct qedf_rport *fcport = io_req->fcport;
- struct fc_rport_priv *rdata = fcport->rdata;
- struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_rport_priv *rdata;
+ struct qedf_ctx *qedf;
u16 xid;
u32 r_a_tov = 0;
int rc = 0;
@@ -1485,15 +1488,18 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
struct fcoe_wqe *sqe;
u16 sqe_idx;
- r_a_tov = rdata->r_a_tov;
- lport = qedf->lport;
-
+ /* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
+ QEDF_ERR(NULL, "tgt not offloaded\n");
rc = 1;
goto abts_err;
}
+ rdata = fcport->rdata;
+ r_a_tov = rdata->r_a_tov;
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
rc = 1;
@@ -1729,6 +1735,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
return SUCCESS;
}
+ /* Sanity check qedf_rport before dereferencing any pointers */
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(NULL, "tgt not offloaded\n");
+ rc = 1;
+ return SUCCESS;
+ }
+
qedf = fcport->qedf;
if (!qedf) {
QEDF_ERR(NULL, "qedf is NULL.\n");
@@ -1837,7 +1850,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
return FAILED;
}
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
rc = FAILED;
return FAILED;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index a5c97342fd5d..b58bba4604e8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -22,6 +22,7 @@
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include "qedf.h"
+#include <uapi/linux/pci_regs.h>
const struct qed_fcoe_ops *qed_ops;
@@ -94,7 +95,7 @@ module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
"qed module during probe.");
-static uint qedf_dp_level;
+static uint qedf_dp_level = QED_LEVEL_NOTICE;
module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
@@ -441,7 +442,8 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
qedf_update_link_speed(qedf, link);
if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
- QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n");
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "DCBx done.\n");
if (atomic_read(&qedf->link_down_tmo_valid) > 0)
queue_delayed_work(qedf->link_update_wq,
&qedf->link_recovery, 0);
@@ -627,6 +629,16 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
+static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd)
+{
+ QEDF_ERR(NULL, "BUS RESET Issued...\n");
+ /*
+ * Essentially a no-op but return SUCCESS to prevent
+ * unnecessary escalation to the host reset handler.
+ */
+ return SUCCESS;
+}
+
void qedf_wait_for_upload(struct qedf_ctx *qedf)
{
while (1) {
@@ -639,27 +651,17 @@ void qedf_wait_for_upload(struct qedf_ctx *qedf)
}
}
-/* Reset the host by gracefully logging out and then logging back in */
-static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
+/* Performs soft reset of qedf_ctx by simulating a link down/up */
+static void qedf_ctx_soft_reset(struct fc_lport *lport)
{
- struct fc_lport *lport;
struct qedf_ctx *qedf;
- lport = shost_priv(sc_cmd->device->host);
-
if (lport->vport) {
QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
- return SUCCESS;
+ return;
}
- qedf = (struct qedf_ctx *)lport_priv(lport);
-
- if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
- test_bit(QEDF_UNLOADING, &qedf->flags) ||
- test_bit(QEDF_DBG_STOP_IO, &qedf->flags))
- return FAILED;
-
- QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
+ qedf = lport_priv(lport);
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@@ -671,6 +673,24 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
qedf->vlan_id = 0;
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
+}
+
+/* Reset the host by gracefully logging out and then logging back in */
+static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+
+ lport = shost_priv(sc_cmd->device->host);
+ qedf = lport_priv(lport);
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
+ test_bit(QEDF_UNLOADING, &qedf->flags))
+ return FAILED;
+
+ QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
+
+ qedf_ctx_soft_reset(lport);
return SUCCESS;
}
@@ -688,7 +708,7 @@ static struct scsi_host_template qedf_host_template = {
.module = THIS_MODULE,
.name = QEDF_MODULE_NAME,
.this_id = -1,
- .cmd_per_lun = 3,
+ .cmd_per_lun = 32,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
@@ -696,11 +716,13 @@ static struct scsi_host_template qedf_host_template = {
.eh_abort_handler = qedf_eh_abort,
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
+ .eh_bus_reset_handler = qedf_eh_bus_reset,
.eh_host_reset_handler = qedf_eh_host_reset,
.slave_configure = qedf_slave_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
+ .change_queue_depth = scsi_change_queue_depth,
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
@@ -874,7 +896,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
- cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ cp = skb_put(skb, tlen);
}
memset(cp, 0, sizeof(*cp));
@@ -950,25 +972,21 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
sizeof(void *);
fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
- fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
- &fcport->sq_dma, GFP_KERNEL);
+ fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
+ fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
if (!fcport->sq) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
- "queue.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
rval = 1;
goto out;
}
- memset(fcport->sq, 0, fcport->sq_mem_size);
- fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
if (!fcport->sq_pbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
- "queue PBL.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
rval = 1;
goto out_free_sq;
}
- memset(fcport->sq_pbl, 0, fcport->sq_pbl_size);
/* Create PBL */
num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
@@ -1334,6 +1352,59 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
}
+static void qedf_setup_fdmi(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport = qedf->lport;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+ u8 buf[8];
+ int i, pos;
+
+ /*
+ * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ */
+ lport->fdmi_enabled = 1;
+
+ /*
+ * Setup the necessary fc_host attributes to that will be used to fill
+ * in the FDMI information.
+ */
+
+ /* Get the PCI-e Device Serial Number Capability */
+ pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
+
+ snprintf(fc_host->serial_number,
+ sizeof(fc_host->serial_number),
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(fc_host->serial_number,
+ sizeof(fc_host->serial_number), "Unknown");
+
+ snprintf(fc_host->manufacturer,
+ sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+
+ snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+
+ snprintf(fc_host->model_description, sizeof(fc_host->model_description),
+ "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
+ "(FCoE)");
+
+ snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
+ "Rev %d", qedf->pdev->revision);
+
+ snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
+ "%s", QEDF_VERSION);
+
+ snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
+ "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+}
+
static int qedf_lport_setup(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
@@ -1377,6 +1448,8 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
snprintf(fc_host_symbolic_name(lport->host), 256,
"QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ qedf_setup_fdmi(qedf);
+
return 0;
}
@@ -1613,8 +1686,7 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_fabric_logoff(lport);
- fc_fabric_login(lport);
+ qedf_ctx_soft_reset(lport);
return 0;
}
@@ -1979,6 +2051,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
* Learn interrupt configuration
*/
rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
+ if (rc <= 0)
+ return 0;
rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
if (rc)
@@ -2011,6 +2085,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
u8 *dest_mac = NULL;
struct fcoe_hdr *hp;
struct qedf_rport *fcport;
+ struct fc_lport *vn_port;
+ u32 f_ctl;
lport = qedf->lport;
if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
@@ -2047,6 +2123,10 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
fh = fc_frame_header_get(fp);
+ /*
+ * Invalid frame filters.
+ */
+
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
fh->fh_type == FC_TYPE_FCP) {
/* Drop FCP data. We dont this in L2 path */
@@ -2072,6 +2152,45 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
return;
}
+ if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
+ return;
+ }
+
+ if (qedf->ctlr.state) {
+ if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Wrong source address: mac:%pM dest_addr:%pM.\n",
+ mac, qedf->ctlr.dest_addr);
+ kfree_skb(skb);
+ return;
+ }
+ }
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+
+ /*
+ * If the destination ID from the frame header does not match what we
+ * have on record for lport and the search for a NPIV port came up
+ * empty then this is not addressed to our port so simply drop it.
+ */
+ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
+ return;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
+ (f_ctl & FC_FC_EX_CTX)) {
+ /* Drop incoming ABTS response that has both SEQ/EX CTX set */
+ kfree_skb(skb);
+ return;
+ }
+
/*
* If a connection is uploading, drop incoming FCoE frames as there
* is a small window where we could try to return a frame while libfc
@@ -2117,7 +2236,7 @@ static void qedf_ll2_process_skb(struct work_struct *work)
/* Undo VLAN encapsulation */
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ eh = skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
@@ -2474,14 +2593,12 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
/* Allocate list of PBL pages */
- qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
if (!qedf->bdq_pbl_list) {
- QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL "
- "pages.\n");
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -2548,8 +2665,9 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
GFP_KERNEL);
if (!qedf->global_queues[i]) {
- QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation "
+ QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
"global queue %d.\n", i);
+ status = -ENOMEM;
goto mem_alloc_failure;
}
@@ -2565,32 +2683,26 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
qedf->global_queues[i]->cq =
- dma_alloc_coherent(&qedf->pdev->dev,
+ dma_zalloc_coherent(&qedf->pdev->dev,
qedf->global_queues[i]->cq_mem_size,
&qedf->global_queues[i]->cq_dma, GFP_KERNEL);
if (!qedf->global_queues[i]->cq) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
- "cq.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedf->global_queues[i]->cq, 0,
- qedf->global_queues[i]->cq_mem_size);
qedf->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedf->pdev->dev,
+ dma_zalloc_coherent(&qedf->pdev->dev,
qedf->global_queues[i]->cq_pbl_size,
&qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
if (!qedf->global_queues[i]->cq_pbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
- "cq PBL.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedf->global_queues[i]->cq_pbl, 0,
- qedf->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedf->global_queues[i]->cq_mem_size /
@@ -2683,8 +2795,7 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
- memset(&(qedf->pf_params), 0,
- sizeof(qedf->pf_params));
+ memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
/* Setup the value for fcoe PF */
qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
@@ -2954,7 +3065,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
"WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
sprintf(host_buf, "host_%d", host->host_no);
- qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION);
+ qed_ops->common->set_name(qedf->cdev, host_buf);
/* Set xid max values */
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 4ae5f537a440..6fa442061c32 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -1,15 +1,15 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
-#define QEDF_VERSION "8.10.7.0"
+#define QEDF_VERSION "8.18.22.0"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 10
-#define QEDF_DRIVER_REV_VER 7
+#define QEDF_DRIVER_MINOR_VER 18
+#define QEDF_DRIVER_REV_VER 22
#define QEDF_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 507512cc478b..19254bd739d9 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -333,7 +333,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
/* Obtain buffer address from rqe_opaque */
idx = cqe->rqe_opaque.lo;
- if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
idx);
@@ -370,7 +370,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
/* Obtain buffer address from rqe_opaque */
idx = cqe->rqe_opaque.lo;
- if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
idx);
@@ -2100,14 +2100,16 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
/* Update header info */
SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
ISCSI_ATTR_SIMPLE);
- if (sc->sc_data_direction == DMA_TO_DEVICE) {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_WRITE, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
- } else {
- SET_FIELD(cmd_pdu_header.flags_attr,
- ISCSI_CMD_HDR_READ, 1);
- task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ if (hdr->cdb[0] != TEST_UNIT_READY) {
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
+ } else {
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
+ task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
+ }
}
cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
@@ -2118,7 +2120,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
- cmd_pdu_header.opcode = hdr->opcode;
+ cmd_pdu_header.hdr_first_byte = hdr->opcode;
qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
/* Fill tx AHS and rx buffer */
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index fd354d4e03eb..7df32a68bd54 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -578,7 +578,8 @@ int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
(struct iscsi_common_hdr *)cmd_header,
tx_sgl_params, cmd_params,
dif_task_params);
- else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
+ (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
return init_rw_iscsi_task(task_params,
ISCSI_TASK_TYPE_INITIATOR_READ,
conn_params,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 87f0af358b33..80edd28b635f 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1466,9 +1466,6 @@ static const struct {
{ ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR,
"out of sge error"
},
- { ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR,
- "tcp seg ip options error"
- },
{ ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR,
"tcp ip fragment error"
},
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 879d3b7462f9..5f5a4ef2e529 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1842,7 +1842,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->mac);
sprintf(host_buf, "host_%d", qedi->shost->host_no);
- qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION);
+ qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 634254a52301..8a29fb09db14 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -3390,7 +3390,7 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
* On PCI bus, order reverses and write of 6 posts, then index 5,
* causing chip to issue full queue of stale commands
* The mmiowb() prevents future writes from crossing the barrier.
- * See Documentation/DocBook/deviceiobook.tmpl for more information.
+ * See Documentation/driver-api/device-io.rst for more information.
*/
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
mmiowb();
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index de952935b5d2..036cc3f217b1 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -2,6 +2,7 @@ config SCSI_QLA_FC
tristate "QLogic QLA2XXX Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
+ depends on NVME_FC || !NVME_FC
select FW_LOADER
select BTREE
---help---
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 44def6bb4bb0..0b767a0bb308 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7c8d6c54ab70..08a1feb3a195 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -769,7 +769,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
did.b.area = (type & 0x0000ff00) >> 8;
did.b.al_pa = (type & 0x000000ff);
- ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
+ ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
did.b.domain, did.b.area, did.b.al_pa);
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
@@ -929,7 +929,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
iter->name, ret);
else
ql_dbg(ql_dbg_init, vha, 0x00f4,
- "Successfully created sysfs %s binary attribure.\n",
+ "Successfully created sysfs %s binary attribute.\n",
iter->name);
}
}
@@ -2096,7 +2096,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
"Can't create qpair for VP[%d]\n",
@@ -2289,7 +2289,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
- fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ca3420de5a01..2ea0ef93f5cb 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -293,7 +293,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
ql_dbg(ql_dbg_user, vha, 0x7002,
- "Multiple SG's are not suppored for ELS requests, "
+ "Multiple SG's are not supported for ELS requests, "
"request_sg_cnt=%x reply_sg_cnt=%x.\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt);
@@ -2135,7 +2135,7 @@ qla8044_serdes_op(struct bsg_job *bsg_job)
bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
- ql_dbg(ql_dbg_user, vha, 0x70cf,
+ ql_dbg(ql_dbg_user, vha, 0x7020,
"Unknown serdes cmd %x.\n", sr.cmd);
rval = -EINVAL;
break;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88748a6ab73f..26751d34bcf2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,9 +15,10 @@
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
- * | Device Discovery | 0x2004 | 0x2016 |
- * | | | 0x2011-0x2012, |
- * | | | 0x2099-0x20a4 |
+ * | Device Discovery | 0x2134 | 0x210e-0x2116 |
+ * | | | 0x211a |
+ * | | | 0x211c-0x2128 |
+ * | | | 0x212a-0x2130 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -59,10 +60,10 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc010 | |
- * | Misc | 0xd301 | 0xd031-0xd0ff |
+ * | Misc | 0xd302 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe080 | |
+ * | Target Mode | 0xe081 | |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
@@ -498,6 +499,50 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_offld_chain *c = ptr;
+
+ if (!ha->exlogin_buf)
+ return ptr;
+
+ *last_chain = &c->type;
+
+ c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
+ c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size);
+ c->size = cpu_to_be32(ha->exlogin_size);
+ c->addr = cpu_to_be64(ha->exlogin_buf_dma);
+
+ ptr += sizeof(struct qla2xxx_offld_chain);
+ memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
+
+ return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_offld_chain *c = ptr;
+
+ if (!ha->exchoffld_buf)
+ return ptr;
+
+ *last_chain = &c->type;
+
+ c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
+ c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size);
+ c->size = cpu_to_be32(ha->exchoffld_size);
+ c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
+
+ ptr += sizeof(struct qla2xxx_offld_chain);
+ memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
+
+ return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
uint32_t **last_chain)
{
@@ -1606,6 +1651,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -1932,6 +1978,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+ nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -2443,6 +2491,8 @@ copy_queue:
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+ nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -2713,3 +2763,104 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
buf + cnt, min(16U, size - cnt), false);
}
}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * vha: Pointer to the scsi_qla_host_t
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
+
+ if (level > ql_errlev)
+ return;
+
+ if (qpair != NULL) {
+ const struct pci_dev *pdev = qpair->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case ql_log_fatal: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_warn:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_info:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
+ }
+
+ va_end(va);
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * vha: Pointer to the scsi_qla_host_t.
+ * id: This is a unique identifier for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (qpair != NULL) {
+ const struct pci_dev *pdev = qpair->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
+ }
+
+ va_end(va);
+
+}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index c6bffe929fe7..8877aa97d829 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,15 @@ struct qla2xxx_fce_chain {
uint32_t eregs[8];
};
+/* used by exchange off load and extended login offload */
+struct qla2xxx_offld_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t size;
+ u64 addr;
+};
+
struct qla2xxx_mq_chain {
uint32_t type;
uint32_t chain_size;
@@ -258,6 +267,8 @@ struct qla2xxx_mqueue_chain {
#define DUMP_CHAIN_FCE 0x7FFFFAF0
#define DUMP_CHAIN_MQ 0x7FFFFAF1
#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
+#define DUMP_CHAIN_EXLOGIN 0x7FFFFAF3
+#define DUMP_CHAIN_EXCHG 0x7FFFFAF4
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
@@ -313,12 +324,18 @@ void __attribute__((format (printf, 4, 5)))
ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
void __attribute__((format (printf, 4, 5)))
ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
* as ql2xextended_error_logging is of type signed int
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index eddbc1218a39..0730b10b4280 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -37,6 +37,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
#include "qla_nx2.h"
+#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@@ -252,6 +253,8 @@
#define NPH_F_PORT 0x7fe /* FFFFFE */
#define NPH_IP_BROADCAST 0x7ff /* FFFFFF */
+#define NPH_SNS_LID(ha) (IS_FWI2_CAPABLE(ha) ? NPH_SNS : SIMPLE_NAME_SERVER)
+
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
@@ -284,7 +287,7 @@ struct name_list_extended {
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
-#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */
+#define FW_DEF_EXCHANGES_CNT 2048
struct req_que;
struct qla_tgt_sess;
@@ -341,6 +344,7 @@ struct srb_iocb {
#define SRB_LOGIN_RETRIED BIT_0
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
+#define SRB_LOGIN_NVME_PRLI BIT_3
uint16_t data[2];
u32 iop[2];
} logio;
@@ -409,6 +413,21 @@ struct srb_iocb {
struct {
struct imm_ntfy_from_isp *ntfy;
} nack;
+ struct {
+ __le16 comp_status;
+ uint16_t rsp_pyld_len;
+ uint8_t aen_op;
+ void *desc;
+
+ /* These are only used with ls4 requests */
+ int cmd_len;
+ int rsp_len;
+ dma_addr_t cmd_dma;
+ dma_addr_t rsp_dma;
+ enum nvmefc_fcp_datadir dir;
+ uint32_t dl;
+ uint32_t timeout_sec;
+ } nvme;
} u;
struct timer_list timer;
@@ -434,9 +453,24 @@ struct srb_iocb {
#define SRB_NACK_PLOGI 16
#define SRB_NACK_PRLI 17
#define SRB_NACK_LOGO 18
+#define SRB_NVME_CMD 19
+#define SRB_NVME_LS 20
+#define SRB_PRLI_CMD 21
+
+enum {
+ TYPE_SRB,
+ TYPE_TGT_CMD,
+};
typedef struct srb {
+ /*
+ * Do not move cmd_type field, it needs to
+ * line up with qla_tgt_cmd->cmd_type
+ */
+ uint8_t cmd_type;
+ uint8_t pad[3];
atomic_t ref_count;
+ wait_queue_head_t nvme_ls_waitQ;
struct fc_port *fcport;
struct scsi_qla_host *vha;
uint32_t handle;
@@ -1075,6 +1109,7 @@ struct mbx_cmd_32 {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_PORT_LOGIN 0x7
#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
@@ -2139,6 +2174,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
+ uint8_t fc4f_nvme; /* nvme fc4 feature bits */
} sw_info_t;
/* FCP-4 types */
@@ -2167,7 +2203,8 @@ typedef enum {
FCT_SWITCH,
FCT_BROADCAST,
FCT_INITIATOR,
- FCT_TARGET
+ FCT_TARGET,
+ FCT_NVME
} fc_port_type_t;
enum qla_sess_deletion {
@@ -2224,10 +2261,12 @@ enum fcport_mgt_event {
FCME_RSCN,
FCME_GIDPN_DONE,
FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
+ FCME_PRLI_DONE,
FCME_GNL_DONE,
FCME_GPSC_DONE,
FCME_GPDB_DONE,
FCME_GPNID_DONE,
+ FCME_GFFID_DONE,
FCME_DELETE_DONE,
};
@@ -2261,6 +2300,17 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
+ struct work_struct nvme_del_work;
+ atomic_t nvme_ref_count;
+ wait_queue_head_t nvme_waitQ;
+ uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_CONF BIT_7
+#define NVME_PRLI_SP_INITIATOR BIT_5
+#define NVME_PRLI_SP_TARGET BIT_4
+#define NVME_PRLI_SP_DISCOVERY BIT_3
+ uint8_t nvme_flag;
+#define NVME_FLAG_REGISTERED 4
+
struct fc_port *conflict;
unsigned char logout_completed;
int generation;
@@ -2293,6 +2343,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
+ uint8_t fc4f_nvme;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2300,6 +2351,8 @@ typedef struct fc_port {
uint16_t port_id;
+ struct nvme_fc_remote_port *nvme_remote_port;
+
unsigned long retry_delay_timestamp;
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
@@ -2732,7 +2785,7 @@ struct ct_sns_req {
struct {
uint8_t reserved;
- uint8_t port_name[3];
+ uint8_t port_id[3];
} gff_id;
struct {
@@ -2814,6 +2867,7 @@ struct ct_sns_rsp {
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
+#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
} gff_id;
@@ -3039,6 +3093,7 @@ enum qla_work_type {
QLA_EVT_GPNID_DONE,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
+ QLA_EVT_PRLI,
QLA_EVT_GPSC,
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
@@ -3169,6 +3224,21 @@ struct qla_tc_param {
#define QLA_PRECONFIG_VPORTS 32
#define QLA_MAX_VPORTS_QLA24XX 128
#define QLA_MAX_VPORTS_QLA25XX 256
+
+struct qla_tgt_counters {
+ uint64_t qla_core_sbt_cmd;
+ uint64_t core_qla_que_buf;
+ uint64_t qla_core_ret_ctio;
+ uint64_t core_qla_snd_status;
+ uint64_t qla_core_ret_sta_ctio;
+ uint64_t core_qla_free_cmd;
+ uint64_t num_q_full_sent;
+ uint64_t num_alloc_iocb_failed;
+ uint64_t num_term_xchg_sent;
+};
+
+struct qla_qpair;
+
/* Response queue data structure */
struct rsp_que {
dma_addr_t dma;
@@ -3188,6 +3258,7 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
+ struct qla_qpair *qpair;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -3228,6 +3299,15 @@ struct req_que {
struct qla_qpair {
spinlock_t qp_lock;
atomic_t ref_count;
+ uint32_t lun_cnt;
+ /*
+ * For qpair 0, qp_lock_ptr will point at hardware_lock due to
+ * legacy code. For other Qpair(s), it will point at qp_lock.
+ */
+ spinlock_t *qp_lock_ptr;
+ struct scsi_qla_host *vha;
+ u32 chip_reset;
+
/* distill these fields down to 'online=0/1'
* ha->flags.eeh_busy
* ha->flags.pci_channel_io_perm_failure
@@ -3237,14 +3317,18 @@ struct qla_qpair {
/* move vha->flags.difdix_supported here */
uint32_t difdix_supported:1;
uint32_t delete_in_progress:1;
+ uint32_t fw_started:1;
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t use_shadow_reg:1;
uint16_t id; /* qp number used with FW */
- uint16_t num_active_cmd; /* cmds down at firmware */
- cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
uint16_t vp_idx; /* vport ID */
-
mempool_t *srb_mempool;
+ struct pci_dev *pdev;
+ void (*reqq_start_iocbs)(struct qla_qpair *);
+
/* to do: New driver: move queues to here instead of pointers */
struct req_que *req;
struct rsp_que *rsp;
@@ -3253,7 +3337,9 @@ struct qla_qpair {
struct qla_hw_data *hw;
struct work_struct q_work;
struct list_head qp_list_elem; /* vha->qp_list */
- struct scsi_qla_host *vha;
+ struct list_head hints_list;
+ uint16_t cpuid;
+ struct qla_tgt_counters tgt_counters;
};
/* Place holder for FW buffer parameters */
@@ -3272,8 +3358,6 @@ struct scsi_qlt_host {
struct qlt_hw_data {
/* Protected by hw lock */
- uint32_t enable_class_2:1;
- uint32_t enable_explicit_conf:1;
uint32_t node_name_set:1;
dma_addr_t atio_dma; /* Physical address. */
@@ -3285,9 +3369,6 @@ struct qlt_hw_data {
uint32_t __iomem *atio_q_out;
struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
- uint16_t current_handle;
-
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
@@ -3302,6 +3383,7 @@ struct qlt_hw_data {
struct dentry *dfs_tgt_sess;
struct dentry *dfs_tgt_port_database;
+ struct dentry *dfs_naqp;
struct list_head q_full_list;
uint32_t num_pend_cmds;
@@ -3310,7 +3392,8 @@ struct qlt_hw_data {
spinlock_t q_full_lock;
uint32_t leak_exchg_thresh_hold;
spinlock_t sess_lock;
- int rspq_vector_cpuid;
+ int num_act_qpairs;
+#define DEFAULT_NAQP 2
spinlock_t atio_lock ____cacheline_aligned;
struct btree_head32 host_map;
};
@@ -3591,6 +3674,10 @@ struct qla_hw_data {
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXCHG_OFFLD_CAPABLE(ha) \
+ (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3927,24 +4014,11 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
- uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
};
-struct qla_tgt_counters {
- uint64_t qla_core_sbt_cmd;
- uint64_t core_qla_que_buf;
- uint64_t qla_core_ret_ctio;
- uint64_t core_qla_snd_status;
- uint64_t qla_core_ret_sta_ctio;
- uint64_t core_qla_free_cmd;
- uint64_t num_q_full_sent;
- uint64_t num_alloc_iocb_failed;
- uint64_t num_term_xchg_sent;
-};
-
/*
* Qlogic scsi host structure
*/
@@ -3973,6 +4047,9 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
uint32_t qpairs_available:1;
+ uint32_t qpairs_req_created:1;
+ uint32_t qpairs_rsp_created:1;
+ uint32_t nvme_enabled:1;
} flags;
atomic_t loop_state;
@@ -4017,7 +4094,6 @@ typedef struct scsi_qla_host {
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
-#define PCI_ERR 30
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -4052,6 +4128,13 @@ typedef struct scsi_qla_host {
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+ struct nvme_fc_local_port *nvme_local_port;
+ atomic_t nvme_ref_count;
+ wait_queue_head_t nvme_waitQ;
+ struct list_head nvme_rport_list;
+ atomic_t nvme_active_aen_cnt;
+ uint16_t nvme_last_rptd_aen;
+
uint16_t fcoe_vlan_id;
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
@@ -4108,10 +4191,8 @@ typedef struct scsi_qla_host {
struct fc_host_statistics fc_host_stat;
struct qla_statistics qla_stats;
struct bidi_statistics bidi_stats;
-
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
- struct qla_tgt_counters tgt_counters;
uint16_t bbcr;
struct name_list_extended gnl;
/* Count of active session/fcport */
@@ -4156,6 +4237,26 @@ struct qla2_sgx {
srb_t *sp;
};
+#define QLA_FW_STARTED(_ha) { \
+ int i; \
+ _ha->flags.fw_started = 1; \
+ _ha->base_qpair->fw_started = 1; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->fw_started = 1; \
+ } \
+}
+
+#define QLA_FW_STOPPED(_ha) { \
+ int i; \
+ _ha->flags.fw_started = 0; \
+ _ha->base_qpair->fw_started = 0; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->fw_started = 0; \
+ } \
+}
+
/*
* Macros to help code, maintain, etc.
*/
@@ -4199,6 +4300,25 @@ struct qla2_sgx {
#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
atomic_dec(&__qpair->ref_count); \
+
+#define QLA_ENA_CONF(_ha) {\
+ int i;\
+ _ha->base_qpair->enable_explicit_conf = 1; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->enable_explicit_conf = 1; \
+ } \
+}
+
+#define QLA_DIS_CONF(_ha) {\
+ int i;\
+ _ha->base_qpair->enable_explicit_conf = 0; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->enable_explicit_conf = 0; \
+ } \
+}
+
/*
* qla2x00 local function return status codes
*/
@@ -4253,6 +4373,10 @@ enum nexus_wait_type {
WAIT_LUN,
};
+#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
+ (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+
+#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 989e17b0758c..d231e7156134 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -70,7 +70,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
qla2x00_gid_list_size(ha),
&gid_list_dma, GFP_KERNEL);
if (!gid_list) {
- ql_dbg(ql_dbg_user, vha, 0x705c,
+ ql_dbg(ql_dbg_user, vha, 0x7018,
"DMA allocation failed for %u\n",
qla2x00_gid_list_size(ha));
return 0;
@@ -164,26 +164,56 @@ static int
qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
+ struct qla_qpair *qpair = vha->hw->base_qpair;
+ uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
+ core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
+ num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
+ u16 i;
+
+ qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
+ core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
+ qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
+ core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
+ qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
+ core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
+ num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
+ num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
+ num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
+
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
+ core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
+ qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
+ core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
+ qla_core_ret_sta_ctio +=
+ qpair->tgt_counters.qla_core_ret_sta_ctio;
+ core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
+ num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
+ num_alloc_iocb_failed +=
+ qpair->tgt_counters.num_alloc_iocb_failed;
+ num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
+ }
seq_puts(s, "Target Counters\n");
seq_printf(s, "qla_core_sbt_cmd = %lld\n",
- vha->tgt_counters.qla_core_sbt_cmd);
+ qla_core_sbt_cmd);
seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
- vha->tgt_counters.qla_core_ret_sta_ctio);
+ qla_core_ret_sta_ctio);
seq_printf(s, "qla_core_ret_ctio = %lld\n",
- vha->tgt_counters.qla_core_ret_ctio);
+ qla_core_ret_ctio);
seq_printf(s, "core_qla_que_buf = %lld\n",
- vha->tgt_counters.core_qla_que_buf);
+ core_qla_que_buf);
seq_printf(s, "core_qla_snd_status = %lld\n",
- vha->tgt_counters.core_qla_snd_status);
+ core_qla_snd_status);
seq_printf(s, "core_qla_free_cmd = %lld\n",
- vha->tgt_counters.core_qla_free_cmd);
+ core_qla_free_cmd);
seq_printf(s, "num alloc iocb failed = %lld\n",
- vha->tgt_counters.num_alloc_iocb_failed);
+ num_alloc_iocb_failed);
seq_printf(s, "num term exchange sent = %lld\n",
- vha->tgt_counters.num_term_xchg_sent);
+ num_term_xchg_sent);
seq_printf(s, "num Q full sent = %lld\n",
- vha->tgt_counters.num_q_full_sent);
+ num_q_full_sent);
/* DIF stats */
seq_printf(s, "DIF Inp Bytes = %lld\n",
@@ -314,6 +344,81 @@ static const struct file_operations dfs_fce_ops = {
.release = qla2x00_dfs_fce_release,
};
+static int
+qla_dfs_naqp_show(struct seq_file *s, void *unused)
+{
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+
+ seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
+ return 0;
+}
+
+static int
+qla_dfs_naqp_open(struct inode *inode, struct file *file)
+{
+ struct scsi_qla_host *vha = inode->i_private;
+
+ return single_open(file, qla_dfs_naqp_show, vha);
+}
+
+static ssize_t
+qla_dfs_naqp_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ char *buf;
+ int rc = 0;
+ unsigned long num_act_qp;
+
+ if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+ pr_err("host%ld: this adapter does not support Multi Q.",
+ vha->host_no);
+ return -EINVAL;
+ }
+
+ if (!vha->flags.qpairs_available) {
+ pr_err("host%ld: Driver is not setup with Multi Q.",
+ vha->host_no);
+ return -EINVAL;
+ }
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf)) {
+ pr_err("host%ld: fail to copy user buffer.",
+ vha->host_no);
+ return PTR_ERR(buf);
+ }
+
+ num_act_qp = simple_strtoul(buf, NULL, 0);
+
+ if (num_act_qp >= vha->hw->max_qpairs) {
+ pr_err("User set invalid number of qpairs %lu. Max = %d",
+ num_act_qp, vha->hw->max_qpairs);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (num_act_qp != ha->tgt.num_act_qpairs) {
+ ha->tgt.num_act_qpairs = num_act_qp;
+ qlt_clr_qp_table(vha);
+ }
+ rc = count;
+out_free:
+ kfree(buf);
+ return rc;
+}
+
+static const struct file_operations dfs_naqp_ops = {
+ .open = qla_dfs_naqp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = qla_dfs_naqp_write,
+};
+
+
int
qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
@@ -370,7 +475,7 @@ create_nodes:
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
if (!ha->tgt.dfs_tgt_port_database) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03f,
"Unable to create debugFS tgt_port_database node.\n");
goto out;
}
@@ -386,11 +491,20 @@ create_nodes:
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
if (!ha->tgt.dfs_tgt_sess) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Unable to create debugFS tgt_sess node.\n");
+ ql_log(ql_log_warn, vha, 0xd040,
+ "Unable to create debugFS tgt_sess node.\n");
goto out;
}
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+ ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+ 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ if (!ha->tgt.dfs_naqp) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "Unable to create debugFS naqp node.\n");
+ goto out;
+ }
+ }
out:
return 0;
}
@@ -400,6 +514,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (ha->tgt.dfs_naqp) {
+ debugfs_remove(ha->tgt.dfs_naqp);
+ ha->tgt.dfs_naqp = NULL;
+ }
+
if (ha->tgt.dfs_tgt_sess) {
debugfs_remove(ha->tgt.dfs_tgt_sess);
ha->tgt.dfs_tgt_sess = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 1f808928763b..b9c9886e8b1d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -7,6 +7,9 @@
#ifndef __QLA_FW_H
#define __QLA_FW_H
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
#define MBS_CHECKSUM_ERROR 0x4010
#define MBS_INVALID_PRODUCT_KEY 0x4020
@@ -37,6 +40,12 @@ struct port_database_24xx {
#define PDF_CLASS_2 BIT_4
#define PDF_HARD_ADDR BIT_1
+ /*
+ * for NVMe, the login_state field has been
+ * split into nibbles.
+ * The lower nibble is for FCP.
+ * The upper nibble is for NVMe.
+ */
uint8_t current_login_state;
uint8_t last_login_state;
#define PDS_PLOGI_PENDING 0x03
@@ -69,7 +78,11 @@ struct port_database_24xx {
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint8_t reserved_3[24];
+ uint8_t reserved_3[4];
+ uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */
+ uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */
+ uint16_t nvme_first_burst_size;
+ uint8_t reserved_4[14];
};
/*
@@ -593,9 +606,14 @@ struct sts_entry_24xx {
uint32_t residual_len; /* FW calc residual transfer length. */
- uint16_t reserved_1;
+ union {
+ uint16_t reserved_1;
+ uint16_t nvme_rsp_pyld_len;
+ };
+
uint16_t state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
+#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
uint16_t retry_delay;
@@ -605,8 +623,16 @@ struct sts_entry_24xx {
uint32_t rsp_residual_count; /* FCP RSP residual count. */
uint32_t sense_len; /* FCP SENSE length. */
- uint32_t rsp_data_len; /* FCP response data length. */
- uint8_t data[28]; /* FCP response/sense information. */
+
+ union {
+ struct {
+ uint32_t rsp_data_len; /* FCP response data length */
+ uint8_t data[28]; /* FCP rsp/sense information */
+ };
+ struct nvme_fc_ersp_iu nvme_ersp;
+ uint8_t nvme_ersp_data[32];
+ };
+
/*
* If DIF Error is set in comp_status, these additional fields are
* defined:
@@ -819,6 +845,7 @@ struct logio_entry_24xx {
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
+#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */
#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
#define LCF_IMPL_LOGO_ALL BIT_5 /* Implicit LOGO to all ports. */
#define LCF_COND_PLOGI BIT_4 /* PLOGI only if not logged-in. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5b2451745e9f..cadb6e3baacc 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -10,6 +10,17 @@
#include <linux/interrupt.h>
/*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+extern void qla_nvme_register_hba(scsi_qla_host_t *);
+extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
+extern void qla_nvme_delete(scsi_qla_host_t *);
+extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
+extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
+ struct req_que *);
+extern void qla24xx_async_gffid_sp_done(void *, int);
+
+/*
* Global Function Prototypes in qla_init.c source file.
*/
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
@@ -77,8 +88,7 @@ struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
-extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
-extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
extern fc_port_t *
@@ -96,10 +106,11 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
- int, int);
+ int, int, bool);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
@@ -137,8 +148,11 @@ extern int ql2xmdcapmask;
extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
+extern int ql2xiniexchg;
extern int ql2xfwholdabts;
extern int ql2xmvasynctoatio;
+extern int ql2xuctrlirq;
+extern int ql2xnvmeenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -254,7 +268,8 @@ extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
-extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tc_param *);
@@ -604,7 +619,7 @@ extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *);
extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_rft_id(scsi_qla_host_t *);
-extern int qla2x00_rff_id(scsi_qla_host_t *);
+extern int qla2x00_rff_id(scsi_qla_host_t *, u8);
extern int qla2x00_rnn_id(scsi_qla_host_t *);
extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
@@ -630,7 +645,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
-
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -662,9 +678,9 @@ extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int, uint8_t);
+ uint16_t, int, uint8_t, bool);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, struct qla_qpair *);
+ uint16_t, struct qla_qpair *, bool);
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
@@ -833,14 +849,13 @@ extern irqreturn_t qla8044_intr_handler(int, void *);
extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
-
-extern void qlt_host_reset_handler(struct qla_hw_data *ha);
extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
uint16_t *);
extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
-extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
-extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
+extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
+extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
+ response_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
@@ -856,5 +871,6 @@ void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
+void qlt_clr_qp_table(struct scsi_qla_host *vha);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9bc9aa9e164a..b323a7c71eda 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -124,6 +124,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
int rval;
uint16_t comp_status;
struct qla_hw_data *ha = vha->hw;
+ bool lid_is_sns = false;
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
@@ -155,6 +156,25 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
} else
rval = QLA_SUCCESS;
break;
+ case CS_PORT_LOGGED_OUT:
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+ NPH_SNS)
+ lid_is_sns = true;
+ } else {
+ if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+ SIMPLE_NAME_SERVER)
+ lid_is_sns = true;
+ }
+ if (lid_is_sns) {
+ ql_dbg(ql_dbg_async, vha, 0x502b,
+ "%s failed, Name server has logged out",
+ routine);
+ rval = QLA_NOT_LOGGED_IN;
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ }
+ break;
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -530,6 +550,8 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
+ if (vha->flags.nvme_enabled)
+ ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
@@ -555,7 +577,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
* Returns 0 on success.
*/
int
-qla2x00_rff_id(scsi_qla_host_t *vha)
+qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
int rval;
struct qla_hw_data *ha = vha->hw;
@@ -593,7 +615,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
qlt_rff_id(vha, ct_req);
- ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2004,7 +2026,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ ql_dbg(ql_dbg_disc, vha, 0x201b,
"Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
/* Update MS request size. */
@@ -2144,6 +2166,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->a.fc4_types[2],
eiter->a.fc4_types[1]);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x211f,
+ "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.fc4_types[6]);
+ }
+
/* Supported speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
@@ -2216,7 +2245,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
}
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ ql_dbg(ql_dbg_disc, vha, 0x2017,
"Current_Speed = %x.\n", eiter->a.cur_speed);
/* Max frame size. */
@@ -2261,7 +2290,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x203d,
+ ql_dbg(ql_dbg_disc, vha, 0x201a,
"HostName=%s.\n", eiter->a.host_name);
/* Node Name */
@@ -2341,6 +2370,15 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
"Port Active FC4 Type = %02x %02x.\n",
eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.port_fc4_type[4] = 0;
+ eiter->a.port_fc4_type[5] = 0;
+ eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x2120,
+ "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.port_fc4_type[6]);
+ }
+
/* Port State */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_STATE);
@@ -2368,13 +2406,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + 4);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ ql_dbg(ql_dbg_disc, vha, 0x201c,
"Port Id = %x.\n", eiter->a.port_id);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x203e,
+ ql_dbg(ql_dbg_disc, vha, 0x2018,
"RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
entries, size);
@@ -2734,6 +2772,10 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].fc4_type = FC4_TYPE_FCP_SCSI;
else
list[i].fc4_type = FC4_TYPE_OTHER;
+
+ list[i].fc4f_nvme =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ list[i].fc4f_nvme &= 0xf;
}
/* Last device exit. */
@@ -2747,13 +2789,13 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC login state %d \n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ ql_dbg(ql_dbg_disc, vha, 0x201d,
+ "%s %8phC login state %d\n",
+ __func__, fcport->port_name, fcport->fw_login_state);
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x201e,
"%s %8phC generation changed rscn %d|%d login %d|%d \n",
__func__, fcport->port_name, fcport->last_rscn_gen,
fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
@@ -2777,7 +2819,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (atomic_read(&fcport->state) ==
FCS_ONLINE)
break;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
qla24xx_post_gnl_work(vha, fcport);
@@ -2786,14 +2828,14 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
if (fcport->deleted == QLA_SESS_DELETED) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2022,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
/* rscn came in while cmd was out */
@@ -2803,18 +2845,18 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable pulled */
if (ea->sp->gen1 == fcport->rscn_gen) {
if (ea->sp->gen2 == fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
fcport->port_name);
qla24xx_fcport_handle_login(vha, fcport);
}
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
"%s %d %8phC post gidpn\n", __func__, __LINE__,
fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
@@ -2841,7 +2883,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
"Async done-%s res %x, WWPN %8phC ID %3phC \n",
sp->name, res, fcport->port_name, id);
@@ -2897,11 +2939,11 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x20a4,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -2952,7 +2994,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2053,
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
@@ -2965,10 +3007,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
if ((ct_rsp->header.reason_code ==
CT_REASON_INVALID_COMMAND_CODE) ||
(ct_rsp->header.reason_code ==
- CT_REASON_COMMAND_UNSUPPORTED)) {
- ql_dbg(ql_dbg_disc, vha, 0x205a,
- "GPSC command unsupported, disabling "
- "query.\n");
+ CT_REASON_COMMAND_UNSUPPORTED)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2019,
+ "GPSC command unsupported, disabling query.\n");
ha->flags.gpsc_supported = 0;
res = QLA_SUCCESS;
}
@@ -2997,12 +3038,11 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
break;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
- sp->name,
- fcport->fabric_port_name,
- be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+ sp->name, fcport->fabric_port_name,
+ be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
done:
memset(&ea, 0, sizeof(ea));
@@ -3058,11 +3098,11 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->name, fcport->port_name, sp->handle,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x205e,
+ "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->name, fcport->port_name, sp->handle,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -3118,21 +3158,32 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (fcport) {
/* cable moved. just plugged in */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
-
fcport->rscn_gen++;
fcport->d_id = ea->id;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2064,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ break;
+ }
} else {
/* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
}
@@ -3149,10 +3200,10 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
struct event_arg ea;
struct qla_work_evt *e;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x ID %3phC. %8phC\n",
+ sp->name, res, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3214,8 +3265,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
if (!sp->u.iocb_cmd.u.ctarg.req) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
+ ql_log(ql_log_warn, vha, 0xd041,
+ "Failed to allocate ct_sns request.\n");
goto done_free_sp;
}
@@ -3223,8 +3274,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
+ ql_log(ql_log_warn, vha, 0xd042,
+ "Failed to allocate ct_sns request.\n");
goto done_free_sp;
}
@@ -3251,9 +3302,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
return rval;
done_free_sp:
@@ -3276,3 +3327,111 @@ done_free_sp:
done:
return rval;
}
+
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ qla24xx_post_gnl_work(vha, fcport);
+}
+
+void qla24xx_async_gffid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ struct ct_sns_rsp *ct_rsp;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2133,
+ "Async done-%s res %x ID %x. %8phC\n",
+ sp->name, res, fcport->d_id.b24, fcport->port_name);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ /*
+ * FC-GS-7, 5.2.3.12 FC-4 Features - format
+ * The format of the FC-4 Features object, as defined by the FC-4,
+ * Shall be an array of 4-bit values, one for each type code value
+ */
+ if (!res) {
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ /* w1 b00:03 */
+ fcport->fc4_type =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fcport->fc4_type &= 0xf;
+ }
+
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ /* w5 [00:03]/28h */
+ fcport->fc4f_nvme =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ fcport->fc4f_nvme &= 0xf;
+ }
+ }
+
+ memset(&ea, 0, sizeof(ea));
+ ea.sp = sp;
+ ea.fcport = sp->fcport;
+ ea.rc = res;
+ ea.event = FCME_GFFID_DONE;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ sp->free(sp);
+}
+
+/* Get FC4 Feature with Nport ID. */
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ return rval;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ return rval;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gffid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
+ GFF_ID_RSP_SIZE);
+
+ ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
+
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla24xx_async_gffid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2132,
+ "Async-%s hdl=%x %8phC.\n", sp->name,
+ sp->handle, fcport->port_name);
+
+ return rval;
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0391fc317003..072ad1aa5505 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -37,8 +37,11 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
+ struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -141,7 +144,7 @@ qla2x00_async_login_sp_done(void *ptr, int res)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -191,6 +194,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
@@ -327,38 +334,38 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0;
+ u8 opt = 0, current_login_state;
fcport = ea->fcport;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
fcport->login_retry = vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "GNL failed Port login retry %8phN, retry cnt=%d.\n",
- fcport->port_name, fcport->login_retry);
+ ql_dbg(ql_dbg_disc, vha, 0x20de,
+ "GNL failed Port login retry %8phN, retry cnt=%d.\n",
+ fcport->port_name, fcport->login_retry);
}
return;
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20df,
"%s %8phC rscn gen changed rscn %d|%d \n",
__func__, fcport->port_name,
fcport->last_rscn_gen, fcport->rscn_gen);
qla24xx_post_gidpn_work(vha, fcport);
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC login gen changed login %d|%d \n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ ql_dbg(ql_dbg_disc, vha, 0x20e0,
+ "%s %8phC login gen changed login %d|%d\n",
+ __func__, fcport->port_name,
+ fcport->last_login_gen, fcport->login_gen);
return;
}
n = ea->data[0] / sizeof(struct get_name_list_extended);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e1,
"%s %d %8phC n %d %02x%02x%02x lid %d \n",
__func__, __LINE__, fcport->port_name, n,
fcport->d_id.b.domain, fcport->d_id.b.area,
@@ -380,20 +387,20 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
- __func__, fcport->port_name,
- e->current_login_state, fcport->fw_login_state,
- id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20e2,
+ "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ __func__, fcport->port_name,
+ e->current_login_state, fcport->fw_login_state,
+ id.b.domain, id.b.area, id.b.al_pa,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
if ((id.b24 != fcport->d_id.b24) ||
((fcport->loop_id != FC_NO_LOOP_ID) &&
(fcport->loop_id != loop_id))) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion(fcport, 1);
return;
}
@@ -414,24 +421,28 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->login_pause = 1;
}
- switch (e->current_login_state) {
+ if (fcport->fc4f_nvme)
+ current_login_state = e->current_login_state >> 4;
+ else
+ current_login_state = e->current_login_state & 0xf;
+
+ switch (current_login_state) {
case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e4,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
opt = PDO_FORCE_ADISC;
qla24xx_post_gpdb_work(vha, fcport, opt);
break;
-
case DSC_LS_PORT_UNAVAIL:
default:
if (fcport->loop_id == FC_NO_LOOP_ID) {
qla2x00_find_new_loop_id(vha, fcport);
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d %8phC\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_fcport_handle_login(vha, fcport);
break;
}
@@ -456,7 +467,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla2x00_find_fcport_by_wwpn(vha,
e->port_name, 0);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
conflict_fcport->port_name);
@@ -487,7 +498,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
u64 wwn;
struct list_head h;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
@@ -512,7 +523,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
set_bit(loop_id, vha->hw->loop_id_map);
wwn = wwn_to_u64(e->port_name);
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
@@ -551,7 +562,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!vha->flags.online)
goto done;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -598,9 +609,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - OUT WWPN %8phC hndl %x\n",
- sp->name, fcport->port_name, sp->handle);
+ ql_dbg(ql_dbg_disc, vha, 0x20da,
+ "Async-%s - OUT WWPN %8phC hndl %x\n",
+ sp->name, fcport->port_name, sp->handle);
return rval;
@@ -635,7 +646,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
int rval = QLA_SUCCESS;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
@@ -665,6 +676,104 @@ gpd_error_out:
sp->free(sp);
}
+static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_async_prli_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2129,
+ "%s %8phC res %d \n", __func__,
+ sp->fcport->port_name, res);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PRLI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ sp->free(sp);
+}
+
+int
+qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online)
+ return rval;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+ fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
+ fcport->fw_login_state == DSC_LS_PRLI_PEND)
+ return rval;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ return rval;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->logout_completed = 0;
+
+ sp->type = SRB_PRLI_CMD;
+ sp->name = "prli";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prli_sp_done;
+ lio->u.logio.flags = 0;
+
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0x211b,
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
+ fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, fcport->login_retry);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
u8 opt)
{
@@ -701,8 +810,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate port database structure.\n");
+ ql_log(ql_log_warn, vha, 0xd043,
+ "Failed to allocate port database structure.\n");
goto done_free_sp;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -734,9 +843,9 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hndl %x opt %x\n",
- sp->name, fcport->port_name, sp->handle, opt);
+ ql_dbg(ql_dbg_disc, vha, 0x20dc,
+ "Async-%s %8phC hndl %x opt %x\n",
+ sp->name, fcport->port_name, sp->handle, opt);
return rval;
@@ -760,27 +869,27 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
fcport->disc_state, fcport->fw_login_state, rval);
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
"%s %8phC generation changed rscn %d|%d login %d|%d \n",
__func__, fcport->port_name, fcport->last_rscn_gen,
fcport->rscn_gen, fcport->last_login_gen,
fcport->login_gen);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
return;
}
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
return;
@@ -797,14 +906,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (!IS_IIDMA_CAPABLE(vha->hw) ||
!vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name,
vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
"%s %d %8phC post gpsc fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name,
vha->fcport_count);
@@ -823,7 +932,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
"%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
@@ -854,14 +963,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
switch (fcport->disc_state) {
case DSC_DELETED:
if (fcport->loop_id == FC_NO_LOOP_ID) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gnl\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_async_gnl(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_LOGIN_PEND;
qla2x00_post_async_login_work(vha, fcport, NULL);
}
@@ -878,16 +987,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (fcport->flags & FCF_FCP2_DEVICE) {
u8 opt = PDO_FORCE_ADISC;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20c9,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_GPDB;
qla24xx_post_gpdb_work(vha, fcport, opt);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post login \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20cf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_LOGIN_PEND;
qla2x00_post_async_login_work(vha, fcport, NULL);
}
@@ -895,18 +1004,18 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_FAILED:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gidpn \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20d0,
+ "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20d1,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
break;
@@ -923,10 +1032,10 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
{
fcport->rscn_gen++;
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phC DS %d LS %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
+ "%s %8phC DS %d LS %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
if (fcport->flags & FCF_ASYNC_SENT)
return;
@@ -993,14 +1102,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state, fcport->login_pause,
- fcport->deleted, fcport->conflict,
- fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen,
- fcport->flags);
+ ql_dbg(ql_dbg_disc, vha, 0x2102,
+ "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause,
+ fcport->deleted, fcport->conflict,
+ fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen,
+ fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
@@ -1023,7 +1132,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
qla24xx_async_gidpn(vha, fcport);
@@ -1041,6 +1150,20 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
switch (ea->event) {
case FCME_RELOGIN:
+ case FCME_RSCN:
+ case FCME_GIDPN_DONE:
+ case FCME_GPSC_DONE:
+ case FCME_GPNID_DONE:
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+ test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+ return;
+ break;
+ default:
+ break;
+ }
+
+ switch (ea->event) {
+ case FCME_RELOGIN:
if (test_bit(UNLOADING, &vha->dpc_flags))
return;
@@ -1056,10 +1179,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable moved */
rc = qla24xx_post_gpnid_work(vha, &ea->id);
if (rc) {
- ql_log(ql_log_warn, vha, 0xffff,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
+ ql_log(ql_log_warn, vha, 0xd044,
+ "RSCN GPNID work failed %02x%02x%02x\n",
+ ea->id.b.domain, ea->id.b.area,
+ ea->id.b.al_pa);
}
} else {
ea->fcport = fcport;
@@ -1070,14 +1193,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_DOM_ADDR:
if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
mask = 0xffff00;
- ql_log(ql_dbg_async, vha, 0xffff,
- "RSCN: Area 0x%06x was affected\n",
- ea->id.b24);
+ ql_dbg(ql_dbg_async, vha, 0x5044,
+ "RSCN: Area 0x%06x was affected\n",
+ ea->id.b24);
} else {
mask = 0xff0000;
- ql_log(ql_dbg_async, vha, 0xffff,
- "RSCN: Domain 0x%06x was affected\n",
- ea->id.b24);
+ ql_dbg(ql_dbg_async, vha, 0x507a,
+ "RSCN: Domain 0x%06x was affected\n",
+ ea->id.b24);
}
rid = ea->id.b24 & mask;
@@ -1092,9 +1215,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_FAB_ADDR:
default:
- ql_log(ql_log_warn, vha, 0xffff,
- "RSCN: Fabric was affected. Addr format %d\n",
- ea->id.b.rsvd_1);
+ ql_log(ql_log_warn, vha, 0xd045,
+ "RSCN: Fabric was affected. Addr format %d\n",
+ ea->id.b.rsvd_1);
qla2x00_mark_all_devices_lost(vha, 1);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1112,12 +1235,18 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
break;
+ case FCME_PRLI_DONE:
+ qla24xx_handle_prli_done_event(vha, ea);
+ break;
case FCME_GPDB_DONE:
qla24xx_handle_gpdb_event(vha, ea);
break;
case FCME_GPNID_DONE:
qla24xx_handle_gpnid_event(vha, ea);
break;
+ case FCME_GFFID_DONE:
+ qla24xx_handle_gffid_event(vha, ea);
+ break;
case FCME_DELETE_DONE:
qla24xx_handle_delete_done_event(vha, ea);
break;
@@ -1294,6 +1423,27 @@ qla24xx_async_abort_command(srb_t *sp)
}
static void
+qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+{
+ switch (ea->data[0]) {
+ case MBS_COMMAND_COMPLETE:
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+
+ ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2119,
+ "%s %d %8phC unhandle event of %x\n",
+ __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
+ break;
+ }
+}
+
+static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
@@ -1305,15 +1455,22 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->chip_reset = vha->hw->chip_reset;
- ea->fcport->logout_on_delete = 1;
- qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ if (ea->fcport->fc4f_nvme) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ }
break;
case MBS_COMMAND_ERROR:
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
ea->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -1330,10 +1487,10 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
cid.b.al_pa = ea->iop[1] & 0xff;
cid.b.rsvd_1 = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC LoopID 0x%x in use post gnl\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id);
if (IS_SW_RESV_ADDR(cid)) {
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
@@ -1344,11 +1501,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gnl_work(vha, ea->fcport);
break;
case MBS_PORT_ID_USED:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
- ea->fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
+ ea->fcport->d_id.b.al_pa);
qla2x00_clear_loop_id(ea->fcport);
qla24xx_post_gidpn_work(vha, ea->fcport);
@@ -2524,6 +2681,13 @@ cont_alloc:
ha->chain_offset = dump_size;
dump_size += mq_size + fce_size;
+ if (ha->exchoffld_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size;
+ if (ha->exlogin_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size;
+
allocate:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
@@ -2709,7 +2873,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (ql2xexlogins)
ha->flags.exlogins_enabled = 1;
- if (ql2xexchoffld)
+ if (qla_is_exch_offld_enabled(vha))
ha->flags.exchoffld_enabled = 1;
rval = qla2x00_execute_fw(vha, srisc_address);
@@ -2946,7 +3110,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
}
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio) {
+ if (ql2xmvasynctoatio &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
ha->fw_options[2] |= BIT_11;
@@ -2954,11 +3119,25 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_11;
}
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
- qla2x00_set_fw_options(vha, ha->fw_options);
+ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ /*
+ * Tell FW to track each exchange to prevent
+ * driver from using stale exchange.
+ */
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_4;
+ else
+ ha->fw_options[2] &= ~BIT_4;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x00e8,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
+
+ if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
+ qla2x00_set_fw_options(vha, ha->fw_options);
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
@@ -3036,7 +3215,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->rid = cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
- ql_dbg(ql_dbg_init, vha, 0x00fd,
+ ql_dbg(ql_dbg_init, vha, 0x0019,
"Registering vector 0x%x for base que.\n",
msix->entry);
icb->msix = cpu_to_le16(msix->entry);
@@ -3166,7 +3345,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
(mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
- ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+ ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -3178,7 +3357,7 @@ next_check:
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- ha->flags.fw_started = 1;
+ QLA_FW_STARTED(ha);
}
return (rval);
@@ -3840,10 +4019,10 @@ qla2x00_rport_del(void *data)
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phN. rport %p roles %x \n",
- __func__, fcport->port_name, rport,
- rport->roles);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
}
@@ -3883,7 +4062,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->logout_on_delete = 1;
if (!fcport->ct_desc.ct_sns) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
fcport = NULL;
@@ -3985,7 +4164,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
if (LOOP_TRANSITION(vha)) {
- ql_dbg(ql_dbg_disc, vha, 0x201e,
+ ql_dbg(ql_dbg_disc, vha, 0x2099,
"Needs RSCN update and loop transition.\n");
rval = QLA_FUNCTION_FAILED;
}
@@ -4085,7 +4264,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS)
goto cleanup_allocation;
- ql_dbg(ql_dbg_disc, vha, 0x2017,
+ ql_dbg(ql_dbg_disc, vha, 0x2011,
"Entries in ID list (%d).\n", entries);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
(uint8_t *)ha->gid_list,
@@ -4094,7 +4273,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x2018,
+ ql_log(ql_log_warn, vha, 0x2012,
"Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -4109,7 +4288,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- ql_dbg(ql_dbg_disc, vha, 0x2019,
+ ql_dbg(ql_dbg_disc, vha, 0x2096,
"Marking port lost loop_id=0x%04x.\n",
fcport->loop_id);
@@ -4154,11 +4333,11 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201a,
+ ql_dbg(ql_dbg_disc, vha, 0x2097,
"Failed to retrieve fcport information "
"-- get_port_database=%x, loop_id=0x%04x.\n",
rval2, new_fcport->loop_id);
- ql_dbg(ql_dbg_disc, vha, 0x201b,
+ ql_dbg(ql_dbg_disc, vha, 0x2105,
"Scheduling resync.\n");
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
continue;
@@ -4207,7 +4386,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x201c,
+ ql_log(ql_log_warn, vha, 0xd031,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -4230,7 +4409,7 @@ cleanup_allocation:
kfree(new_fcport);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201d,
+ ql_dbg(ql_dbg_disc, vha, 0x2098,
"Configure local loop error exit: rval=%x.\n", rval);
}
@@ -4300,10 +4479,10 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (fcport->port_type == FCT_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phN. rport %p is %s mode \n",
- __func__, fcport->port_name, rport,
- (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+ ql_dbg(ql_dbg_disc, vha, 0x20ee,
+ "%s %8phN. rport %p is %s mode\n",
+ __func__, fcport->port_name, rport,
+ (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -4331,7 +4510,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n",
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
__func__, fcport->port_name);
if (IS_QLAFX00(vha->hw)) {
@@ -4344,6 +4523,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->deleted = 0;
fcport->logout_on_delete = 1;
+ if (fcport->fc4f_nvme) {
+ qla_nvme_register_remote(vha, fcport);
+ return;
+ }
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
@@ -4398,7 +4582,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = SNS_FL_PORT;
rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201f,
+ ql_dbg(ql_dbg_disc, vha, 0x20a0,
"MBX_GET_PORT_NAME failed, No FL Port.\n");
vha->device_flags &= ~SWITCH_FOUND;
@@ -4436,7 +4620,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_disc, vha, 0x2042,
+ ql_dbg(ql_dbg_disc, vha, 0x20a1,
"Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
"mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]);
@@ -4446,22 +4630,39 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2045,
+ ql_dbg(ql_dbg_disc, vha, 0x20a2,
"Register FC-4 TYPE failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
}
- if (qla2x00_rff_id(vha)) {
+ if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2049,
+ ql_dbg(ql_dbg_disc, vha, 0x209a,
"Register FC-4 Features failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
+ }
+ if (vha->flags.nvme_enabled) {
+ if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
+ "Register NVME FC Type Features failed.\n");
+ }
}
if (qla2x00_rnn_id(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x204f,
+ ql_dbg(ql_dbg_disc, vha, 0x2104,
"Register Node Name failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
} else if (qla2x00_rsnn_nn(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2053,
- "Register Symobilic Node Name failed.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x209b,
+ "Register Symbolic Node Name failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
}
}
@@ -4482,6 +4683,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
break;
} while (0);
+ if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+ qla_nvme_register_hba(vha);
+
if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -4527,30 +4731,41 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
swl = ha->swl;
if (!swl) {
/*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2054,
+ ql_dbg(ql_dbg_disc, vha, 0x209c,
"GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
}
/* If other queries succeeded probe for FC-4 type */
- if (swl)
+ if (swl) {
qla2x00_gff_id(vha, swl);
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
+ }
}
swl_idx = 0;
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x205e,
+ ql_log(ql_log_warn, vha, 0x209d,
"Failed to allocate memory for fcport.\n");
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -4588,6 +4803,16 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fp_speed = swl[swl_idx].fp_speed;
new_fcport->fc4_type = swl[swl_idx].fc4_type;
+ new_fcport->nvme_flag = 0;
+ if (vha->flags.nvme_enabled &&
+ swl[swl_idx].fc4f_nvme) {
+ new_fcport->fc4f_nvme =
+ swl[swl_idx].fc4f_nvme;
+ ql_log(ql_log_info, vha, 0x2131,
+ "FOUND: NVME port %8phC as FC Type 28h\n",
+ new_fcport->port_name);
+ }
+
if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
last_dev = 1;
}
@@ -4597,7 +4822,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Send GA_NXT to the switch */
rval = qla2x00_ga_nxt(vha, new_fcport);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x2064,
+ ql_log(ql_log_warn, vha, 0x209e,
"SNS scan failed -- assuming "
"zero-entry result.\n");
rval = QLA_SUCCESS;
@@ -4610,7 +4835,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
wrap.b24 = new_fcport->d_id.b24;
first_dev = 0;
} else if (new_fcport->d_id.b24 == wrap.b24) {
- ql_dbg(ql_dbg_disc, vha, 0x2065,
+ ql_dbg(ql_dbg_disc, vha, 0x209f,
"Device wrap (%02x%02x%02x).\n",
new_fcport->d_id.b.domain,
new_fcport->d_id.b.area,
@@ -4722,7 +4947,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
nxt_d_id.b24 = new_fcport->d_id.b24;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x2066,
+ ql_log(ql_log_warn, vha, 0xd032,
"Memory allocation failed for fcport.\n");
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -4753,7 +4978,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
@@ -5473,6 +5698,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct scsi_qla_host *vp;
unsigned long flags;
fc_port_t *fcport;
+ u16 i;
/* For ISP82XX, driver waits for completion of the commands.
* online flag should be set.
@@ -5498,7 +5724,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
- ha->chip_reset++;
+ ha->base_qpair->chip_reset++;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ ha->queue_pair_map[i]->chip_reset =
+ ha->base_qpair->chip_reset;
+ }
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -6353,8 +6584,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
"-> template size %x bytes\n", dlen);
if (dlen > risc_size * sizeof(*dcode)) {
ql_log(ql_log_warn, vha, 0x0167,
- "Failed fwdump template exceeds array by %x bytes\n",
- (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+ "Failed fwdump template exceeds array by %zx bytes\n",
+ (size_t)(dlen - risc_size * sizeof(*dcode)));
goto default_template;
}
ha->fw_dump_template_len = dlen;
@@ -6655,8 +6886,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"-> template size %x bytes\n", dlen);
if (dlen > risc_size * sizeof(*fwcode)) {
ql_log(ql_log_warn, vha, 0x0177,
- "Failed fwdump template exceeds array by %x bytes\n",
- (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+ "Failed fwdump template exceeds array by %zx bytes\n",
+ (size_t)(dlen - risc_size * sizeof(*fwcode)));
goto default_template;
}
ha->fw_dump_template_len = dlen;
@@ -6790,7 +7021,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
ret = qla2x00_stop_firmware(vha);
}
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
ha->flags.fw_init_done = 0;
}
@@ -7322,16 +7553,31 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_11;
}
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ /* FW auto send SCSI status during */
+ ha->fw_options[1] |= BIT_8;
+ ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
+
+ /* FW perform Exchange validation */
+ ha->fw_options[2] |= BIT_4;
+ } else {
+ ha->fw_options[1] &= ~BIT_8;
+ ha->fw_options[10] &= 0x00ff;
+
+ ha->fw_options[2] &= ~BIT_4;
+ }
+
if (ql2xetsenable) {
/* Enable ETS Burst. */
memset(ha->fw_options, 0, sizeof(ha->fw_options));
ha->fw_options[2] |= BIT_9;
}
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
+ ql_dbg(ql_dbg_init, vha, 0x00e9,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
qla2x00_set_fw_options(vha, ha->fw_options);
}
@@ -7512,7 +7758,8 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
-struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+ int vp_idx, bool startqp)
{
int rsp_id = 0;
int req_id = 0;
@@ -7539,6 +7786,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->hw = vha->hw;
qpair->vha = vha;
+ qpair->qp_lock_ptr = &qpair->qp_lock;
+ spin_lock_init(&qpair->qp_lock);
+ qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
/* Assign available que pair id */
mutex_lock(&ha->mq_lock);
@@ -7554,13 +7804,18 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
ha->queue_pair_map[qpair_id] = qpair;
qpair->id = qpair_id;
qpair->vp_idx = vp_idx;
+ INIT_LIST_HEAD(&qpair->hints_list);
+ qpair->chip_reset = ha->base_qpair->chip_reset;
+ qpair->enable_class_2 = ha->base_qpair->enable_class_2;
+ qpair->enable_explicit_conf =
+ ha->base_qpair->enable_explicit_conf;
for (i = 0; i < ha->msix_count; i++) {
msix = &ha->msix_entries[i];
if (msix->in_use)
continue;
qpair->msix = msix;
- ql_log(ql_dbg_multiq, vha, 0xc00f,
+ ql_dbg(ql_dbg_multiq, vha, 0xc00f,
"Vector %x selected for qpair\n", msix->vector);
break;
}
@@ -7572,11 +7827,14 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->msix->in_use = 1;
list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+ qpair->pdev = ha->pdev;
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
mutex_unlock(&ha->mq_lock);
/* Create response queue first */
- rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
if (!rsp_id) {
ql_log(ql_log_warn, vha, 0x0185,
"Failed to create response queue.\n");
@@ -7586,7 +7844,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->rsp = ha->rsp_q_map[rsp_id];
/* Create request queue */
- req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
+ startqp);
if (!req_id) {
ql_log(ql_log_warn, vha, 0x0186,
"Failed to create request queue.\n");
@@ -7595,6 +7854,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->req = ha->req_q_map[req_id];
qpair->rsp->req = qpair->req;
+ qpair->rsp->qpair = qpair;
+ /* init qpair to this cpu. Will adjust at run time. */
+ qla_cpu_update(qpair, smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -7603,7 +7865,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!qpair->srb_mempool) {
- ql_log(ql_log_warn, vha, 0x0191,
+ ql_log(ql_log_warn, vha, 0xd036,
"Failed to create srb mempool for qpair %d\n",
qpair->id);
goto fail_mempool;
@@ -7645,9 +7907,12 @@ fail_qid_map:
int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
{
- int ret;
+ int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
+ if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
+ goto fail;
+
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -7664,8 +7929,11 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
clear_bit(qpair->id, ha->qpair_qid_map);
ha->num_qpairs--;
list_del(&qpair->qp_list_elem);
- if (list_empty(&vha->qp_list))
+ if (list_empty(&vha->qp_list)) {
vha->flags.qpairs_available = 0;
+ vha->flags.qpairs_req_created = 0;
+ vha->flags.qpairs_rsp_created = 0;
+ }
mempool_destroy(qpair->srb_mempool);
kfree(qpair);
mutex_unlock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c61a6a871c8e..9a2c86eacf44 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -250,6 +250,7 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
+ sp->cmd_type = TYPE_SRB;
sp->iocbs = 1;
sp->vha = vha;
done:
@@ -307,3 +308,62 @@ qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
fcport->retry_delay_timestamp = jiffies +
(retry_delay * HZ / 10);
}
+
+static inline bool
+qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
+{
+ if (qla_ini_mode_enabled(vha) &&
+ (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else if (qla_tgt_mode_enabled(vha) &&
+ (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else if (qla_dual_mode_enabled(vha) &&
+ ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else
+ return false;
+}
+
+static inline void
+qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
+{
+ qpair->cpuid = cpuid;
+
+ if (!list_empty(&qpair->hints_list)) {
+ struct qla_qpair_hint *h;
+
+ list_for_each_entry(h, &qpair->hints_list, hint_elem)
+ h->cpuid = qpair->cpuid;
+ }
+}
+
+static inline struct qla_qpair_hint *
+qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
+{
+ struct qla_qpair_hint *h;
+ u16 i;
+
+ for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
+ h = &tgt->qphints[i];
+ if (h->qpair == qpair)
+ return h;
+ }
+
+ return NULL;
+}
+
+static inline void
+qla_83xx_start_iocbs(struct qla_qpair *qpair)
+{
+ struct req_que *req = qpair->req;
+
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ea027f6a7fd4..a36c485fae50 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -464,7 +464,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->mqenable || IS_QLA27XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ } else if (IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
@@ -1768,6 +1770,9 @@ qla2xxx_start_scsi_mq(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
/* Setup qpair pointers */
rsp = qpair->rsp;
req = qpair->req;
@@ -1777,15 +1782,14 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
+ if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
- /* Acquire qpair specific lock */
- spin_lock_irqsave(&qpair->qp_lock, flags);
-
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -1940,6 +1944,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
return qla2xxx_start_scsi_mq(sp);
}
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
/* Setup qpair pointers */
rsp = qpair->rsp;
req = qpair->req;
@@ -1949,15 +1955,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
+ if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
- /* Acquire ring specific lock */
- spin_lock_irqsave(&qpair->qp_lock, flags);
-
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -2107,20 +2112,13 @@ queuing_error:
/* Generic Control-SRB manipulation functions. */
/* hardware_lock assumed to be held. */
-void *
-qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
-{
- if (qla2x00_reset_active(vha))
- return NULL;
-
- return qla2x00_alloc_iocbs(vha, sp);
-}
void *
-qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
+__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
{
+ scsi_qla_host_t *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
+ struct req_que *req = qpair->req;
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
uint32_t index, handle;
request_t *pkt;
@@ -2194,10 +2192,44 @@ skip_cmd_array:
}
queuing_error:
- vha->tgt_counters.num_alloc_iocb_failed++;
+ qpair->tgt_counters.num_alloc_iocb_failed++;
return pkt;
}
+void *
+qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
+{
+ scsi_qla_host_t *vha = qpair->vha;
+
+ if (qla2x00_reset_active(vha))
+ return NULL;
+
+ return __qla2x00_alloc_iocbs(qpair, sp);
+}
+
+void *
+qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
+{
+ return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
+}
+
+static void
+qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
+ logio->control_flags |= LCF_NVME_PRLI;
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->vha->vp_idx;
+}
+
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
@@ -2205,6 +2237,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
@@ -3125,6 +3158,39 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
}
+/*
+ * Build NVME LS request
+ */
+static int
+qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
+{
+ struct srb_iocb *nvme;
+ int rval = QLA_SUCCESS;
+
+ nvme = &sp->u.iocb_cmd;
+ cmd_pkt->entry_type = PT_LS4_REQUEST;
+ cmd_pkt->entry_count = 1;
+ cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+
+ cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ cmd_pkt->tx_dseg_count = 1;
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
+ cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
+ cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+
+ cmd_pkt->rx_dseg_count = 1;
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
+ cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
+ cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+
+ return rval;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3150,6 +3216,9 @@ qla2x00_start_sp(srb_t *sp)
qla24xx_login_iocb(sp, pkt) :
qla2x00_login_iocb(sp, pkt);
break;
+ case SRB_PRLI_CMD:
+ qla24xx_prli_iocb(sp, pkt);
+ break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_logout_iocb(sp, pkt) :
@@ -3178,6 +3247,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
+ case SRB_NVME_LS:
+ qla_nvme_ls(sp, pkt);
+ break;
case SRB_ABT_CMD:
IS_QLAFX00(ha) ?
qlafx00_abort_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2572121b765b..6c6e624a5aa6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <linux/t10-pi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
@@ -17,7 +18,7 @@
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
-static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
/**
@@ -431,8 +432,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
"Register: 0x%x%x.\n", mb[7], mb[3]);
if (err_level == ERR_LEVEL_NON_FATAL) {
ql_log(ql_log_warn, vha, 0x5063,
- "Not a fatal error, f/w has recovered "
- "iteself.\n");
+ "Not a fatal error, f/w has recovered itself.\n");
} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
ql_log(ql_log_fatal, vha, 0x5064,
"Recoverable Fatal error: Chip reset "
@@ -709,7 +709,7 @@ skip_rio:
ha->isp_ops->fw_dump(vha, 1);
ha->flags.fw_init_done = 0;
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -829,7 +829,7 @@ skip_rio:
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
- vha, 0x0144, "LOOP DOWN detected,"
+ vha, 0x00d8, "LOOP DOWN detected,"
"restore WWPN %016llx\n",
wwn_to_u64(vha->port_name));
}
@@ -973,6 +973,23 @@ skip_rio:
if (mb[1] == 0xffff)
goto global_port_update;
+ if (mb[1] == NPH_SNS_LID(ha)) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+ }
+
+ /* use handle_cnt for loop id/nport handle */
+ if (IS_FWI2_CAPABLE(ha))
+ handle_cnt = NPH_SNS;
+ else
+ handle_cnt = SIMPLE_NAME_SERVER;
+ if (mb[1] == handle_cnt) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+ }
+
/* Port logout */
fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
if (!fcport)
@@ -1701,7 +1718,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd046,
"Exchange starvation. Resetting RISC\n");
vha->hw->exch_starvation = 0;
@@ -1781,6 +1798,79 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, 0);
}
+static void
+qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+{
+ const char func[] = "NVME-IOCB";
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ uint16_t state_flags;
+ struct nvmefc_fcp_req *fd;
+ uint16_t ret = 0;
+ struct srb_iocb *nvme;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+ if (!sp)
+ return;
+
+ iocb = &sp->u.iocb_cmd;
+ fcport = sp->fcport;
+ iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+ state_flags = le16_to_cpu(sts->state_flags);
+ fd = iocb->u.nvme.desc;
+ nvme = &sp->u.iocb_cmd;
+
+ if (unlikely(nvme->u.nvme.aen_op))
+ atomic_dec(&sp->vha->nvme_active_aen_cnt);
+
+ /*
+ * State flags: Bit 6 and 0.
+ * If 0 is set, we don't care about 6.
+ * both cases resp was dma'd to host buffer
+ * if both are 0, that is good path case.
+ * if six is set and 0 is clear, we need to
+ * copy resp data from status iocb to resp buffer.
+ */
+ if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
+ iocb->u.nvme.rsp_pyld_len = 0;
+ } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ } else if (state_flags & SF_NVME_ERSP) {
+ uint32_t *inbuf, *outbuf;
+ uint16_t iter;
+
+ inbuf = (uint32_t *)&sts->nvme_ersp_data;
+ outbuf = (uint32_t *)fd->rspaddr;
+ iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ for (; iter; iter--)
+ *outbuf++ = swab32(*inbuf++);
+ } else { /* unhandled case */
+ ql_log(ql_log_warn, fcport->vha, 0x503a,
+ "NVME-%s error. Unhandled state_flags of %x\n",
+ sp->name, state_flags);
+ }
+
+ fd->transferred_length = fd->payload_length -
+ le32_to_cpu(sts->residual_len);
+
+ if (sts->entry_status) {
+ ql_log(ql_log_warn, fcport->vha, 0x5038,
+ "NVME-%s error - hdl=%x entry-status(%x).\n",
+ sp->name, sp->handle, sts->entry_status);
+ ret = QLA_FUNCTION_FAILED;
+ } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_log(ql_log_warn, fcport->vha, 0x5039,
+ "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
+ sp->name, sp->handle, sts->comp_status,
+ le32_to_cpu(sts->residual_len), sts->ox_id);
+ ret = QLA_FUNCTION_FAILED;
+ }
+ sp->done(sp, ret);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2263,6 +2353,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->cmd_type != TYPE_SRB) {
+ req->outstanding_cmds[handle] = NULL;
+ ql_dbg(ql_dbg_io, vha, 0x3015,
+ "Unknown sp->cmd_type %x %p).\n",
+ sp->cmd_type, sp);
+ return;
+ }
+
+ /* NVME completion. */
+ if (sp->type == SRB_NVME_CMD) {
+ qla24xx_nvme_iocb_entry(vha, req, pkt);
+ return;
+ }
+
if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
return;
@@ -2374,8 +2478,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
- "Mid-layer underflow "
- "detected (0x%x of 0x%x bytes).\n",
+ "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16;
@@ -2408,8 +2511,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
- "Dropped frame(s) detected "
- "(0x%x of 0x%x bytes).\n",
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16 | lscsi_status;
@@ -2420,8 +2522,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
- "Mid-layer underflow "
- "detected (0x%x of 0x%x bytes).\n",
+ "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16;
@@ -2435,9 +2536,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
*/
ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
- "Dropped frame(s) detected (0x%x "
- "of 0x%x bytes).\n", resid,
- scsi_bufflen(cp));
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -2619,8 +2719,9 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
* qla2x00_error_entry() - Process an error entry.
* @ha: SCSI driver HA context
* @pkt: Entry pointer
+ * return : 1=allow further error analysis. 0=no additional error analysis.
*/
-static void
+static int
qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
@@ -2631,7 +2732,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
int res = DID_ERROR << 16;
ql_dbg(ql_dbg_async, vha, 0x502a,
- "type of error status in response: 0x%x\n", pkt->entry_status);
+ "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
+ pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
if (que >= ha->max_req_queues || !ha->req_q_map[que])
goto fatal;
@@ -2641,18 +2743,35 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
- if (pkt->entry_type == NOTIFY_ACK_TYPE &&
- pkt->handle == QLA_TGT_SKIP_HANDLE)
- return;
+ if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
+ return 0;
- sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
- if (sp) {
- sp->done(sp, res);
- return;
+ switch (pkt->entry_type) {
+ case NOTIFY_ACK_TYPE:
+ case STATUS_TYPE:
+ case STATUS_CONT_TYPE:
+ case LOGINOUT_PORT_IOCB_TYPE:
+ case CT_IOCB_TYPE:
+ case ELS_IOCB_TYPE:
+ case ABORT_IOCB_TYPE:
+ case MBX_IOCB_TYPE:
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (sp) {
+ sp->done(sp, res);
+ return 0;
+ }
+ break;
+
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case CTIO_CRC2:
+ default:
+ return 1;
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
"Error entry - invalid handle/queue (%04x).\n", que);
+ return 0;
}
/**
@@ -2708,6 +2827,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->done(sp, 0);
}
+void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
+ struct req_que *req)
+{
+ srb_t *sp;
+ const char func[] = "LS4_IOCB";
+ uint16_t comp_status;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ comp_status = le16_to_cpu(pkt->status);
+ sp->done(sp, comp_status);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2721,6 +2855,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!ha->flags.fw_started)
return;
+ if (rsp->qpair->cpuid != smp_processor_id())
+ qla_cpu_update(rsp->qpair, smp_processor_id());
+
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -2733,9 +2870,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
-
- if (qlt_24xx_process_response_error(vha, pkt))
+ if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
goto process_err;
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2768,7 +2903,8 @@ process_err:
case ABTS_RECV_24XX:
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
/* ensure that the ATIO queue is empty */
- qlt_handle_abts_recv(vha, (response_t *)pkt);
+ qlt_handle_abts_recv(vha, rsp,
+ (response_t *)pkt);
break;
} else {
/* drop through */
@@ -2777,11 +2913,16 @@ process_err:
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
- qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
+ break;
+ case PT_LS4_REQUEST:
+ qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
+ rsp->req);
break;
case NOTIFY_ACK_TYPE:
if (pkt->handle == QLA_TGT_SKIP_HANDLE)
- qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ qlt_response_pkt_all_vps(vha, rsp,
+ (response_t *)pkt);
else
qla24xxx_nack_iocb_entry(vha, rsp->req,
(struct nack_to_isp *)pkt);
@@ -3156,10 +3297,10 @@ struct qla_init_msix_entry {
};
static const struct qla_init_msix_entry msix_entries[] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
- { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
+ { "default", qla24xx_msix_default },
+ { "rsp_q", qla24xx_msix_rsp_q },
+ { "atio_q", qla83xx_msix_atio_q },
+ { "qpair_multiq", qla2xxx_msix_rsp_q },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -3183,9 +3324,14 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
min_vecs++;
}
- ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
- &desc);
+ if (USER_CTRL_IRQ(ha)) {
+ /* user wants to control IRQ setting for target mode */
+ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
+ ha->msix_count, PCI_IRQ_MSIX);
+ } else
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
+ ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
@@ -3239,7 +3385,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->handle = rsp;
rsp->msix = qentry;
scnprintf(qentry->name, sizeof(qentry->name),
- "%s", msix_entries[i].name);
+ "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3247,7 +3393,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
- 0, msix_entries[i].name, rsp);
+ 0, qentry->name, rsp);
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
@@ -3263,11 +3409,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
rsp->msix = qentry;
qentry->handle = rsp;
scnprintf(qentry->name, sizeof(qentry->name),
- "%s", msix_entries[QLA_ATIO_VECTOR].name);
+ "qla2xxx%lu_%s", vha->host_no,
+ msix_entries[QLA_ATIO_VECTOR].name);
qentry->in_use = 1;
ret = request_irq(qentry->vector,
msix_entries[QLA_ATIO_VECTOR].handler,
- 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
+ 0, qentry->name, rsp);
qentry->have_irq = 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cba1fc5e8be9..7c6d1a404011 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -124,8 +124,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
/* if PCI error, then avoid mbx processing.*/
- if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x1191,
+ if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
+ test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0xd04e,
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -169,7 +170,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- ql_log(ql_log_warn, vha, 0x1005,
+ ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
return QLA_FUNCTION_TIMEOUT;
@@ -317,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
- ql_log(ql_log_warn, vha, 0x1015,
+ ql_log(ql_log_warn, vha, 0xd048,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -359,7 +360,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
host_status = RD_REG_DWORD(&reg->isp24.host_status);
hccr = RD_REG_DWORD(&reg->isp24.hccr);
- ql_log(ql_log_warn, vha, 0x1119,
+ ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
@@ -384,8 +385,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set.
* we will do premature exit for above case.
*/
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- set_bit(PCI_ERR, &base_vha->dpc_flags);
ha->flags.mbox_busy = 0;
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -561,6 +560,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
}
#define EXTENDED_BB_CREDITS BIT_0
+#define NVME_ENABLE_FLAG BIT_3
+
/*
* qla2x00_execute_fw
* Start adapter firmware.
@@ -602,6 +603,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
} else
mcp->mb[4] = 0;
+ if (ql2xnvmeenable && IS_QLA27XX(ha))
+ mcp->mb[4] |= NVME_ENABLE_FLAG;
+
if (ha->flags.exlogins_enabled)
mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
@@ -822,7 +826,7 @@ qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
*/
#define CONFIG_XCHOFFLD_MEM 0x3
int
-qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
+qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
@@ -835,12 +839,12 @@ qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
- mcp->mb[2] = MSW(phys_addr);
- mcp->mb[3] = LSW(phys_addr);
- mcp->mb[6] = MSW(MSD(phys_addr));
- mcp->mb[7] = LSW(MSD(phys_addr));
- mcp->mb[8] = MSW(ha->exlogin_size);
- mcp->mb[9] = LSW(ha->exlogin_size);
+ mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
+ mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
+ mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
+ mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
+ mcp->mb[8] = MSW(ha->exchoffld_size);
+ mcp->mb[9] = LSW(ha->exchoffld_size);
mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_11|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -942,6 +946,22 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
+
+ /* bit 26 of fw_attributes */
+ if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
+ struct init_cb_24xx *icb;
+
+ icb = (struct init_cb_24xx *)ha->init_cb;
+ /*
+ * fw supports nvme and driver load
+ * parameter requested nvme
+ */
+ vha->flags.nvme_enabled = 1;
+ icb->firmware_options_2 &= cpu_to_le32(~0xf);
+ ha->zio_mode = 0;
+ ha->zio_timer = 0;
+ }
+
}
if (IS_QLA27XX(ha)) {
@@ -1049,6 +1069,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mcp->in_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->in_mb |= MBX_1;
+ mcp->mb[10] = fwopts[10];
+ mcp->out_mb |= MBX_10;
} else {
mcp->mb[10] = fwopts[10];
mcp->mb[11] = fwopts[11];
@@ -3213,7 +3235,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
if (!IS_QLA8044(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
@@ -3229,7 +3251,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1187,
+ ql_dbg(ql_dbg_mbx, vha, 0x11a1,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
@@ -3713,12 +3735,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (rptid_entry->format == 2) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
+ ql_dbg(ql_dbg_async, vha, 0x505f,
"RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- ql_dbg(ql_dbg_async, vha, 0xffff,
+ ql_dbg(ql_dbg_async, vha, 0x5075,
"N2N: Remote WWPN %8phC.\n",
rptid_entry->u.f2.port_name);
@@ -3871,7 +3893,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
rval = QLA_FUNCTION_FAILED;
} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complet IOCB -- completion status (%x).\n",
+ "Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -5790,7 +5812,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
"Entered %s.\n", __func__);
dd_dma = dma_map_single(&vha->hw->pdev->dev,
@@ -5872,13 +5894,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
goto done_free_sp;
}
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
sp->name, sp->handle);
wait_for_completion(&c->u.mbx.comp);
@@ -5887,16 +5909,16 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
rval = c->u.mbx.rc;
switch (rval) {
case QLA_FUNCTION_TIMEOUT:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
__func__, sp->name, rval);
break;
case QLA_SUCCESS:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
sp->free(sp);
break;
default:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
sp->free(sp);
break;
@@ -5927,8 +5949,8 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate port database structure.\n");
+ ql_log(ql_log_warn, vha, 0xd047,
+ "Failed to allocate port database structure.\n");
goto done_free_sp;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -5945,14 +5967,14 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ ql_dbg(ql_dbg_mbx, vha, 0x1193,
"%s: %8phC fail\n", __func__, fcport->port_name);
goto done_free_sp;
}
rval = __qla24xx_parse_gpdb(vha, fcport, pd);
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
__func__, fcport->port_name);
done_free_sp:
@@ -5967,14 +5989,22 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
{
int rval = QLA_SUCCESS;
uint64_t zero = 0;
+ u8 current_login_state, last_login_state;
+
+ if (fcport->fc4f_nvme) {
+ current_login_state = pd->current_login_state >> 4;
+ last_login_state = pd->last_login_state >> 4;
+ } else {
+ current_login_state = pd->current_login_state & 0xf;
+ last_login_state = pd->last_login_state & 0xf;
+ }
/* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
+ if (current_login_state != PDS_PRLI_COMPLETE &&
+ last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+ current_login_state, last_login_state, fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -5997,12 +6027,17 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
+ if (fcport->fc4f_nvme) {
+ fcport->nvme_prli_service_param =
+ pd->prli_nvme_svc_param_word_3;
+ fcport->port_type = FCT_NVME;
+ } else {
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
/* Passback COS information. */
fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
FC_COS_CLASS2 : FC_COS_CLASS3;
@@ -6040,12 +6075,12 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "%s: fail\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x119b,
+ "%s: fail\n", __func__);
} else {
*entries = mc.mb[1];
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "%s: done\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x119c,
+ "%s: done\n", __func__);
}
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 09a490c98763..f0605cd196fb 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -640,11 +640,12 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
+ uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
{
int ret = 0;
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t *reg;
uint32_t cnt;
@@ -731,14 +732,17 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->ring_ptr, req->ring_index, req->cnt,
req->id, req->max_q_depth);
- ret = qla25xx_init_req_que(base_vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_fatal, base_vha, 0x00df,
- "%s failed.\n", __func__);
- mutex_lock(&ha->mq_lock);
- clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->mq_lock);
- goto que_failed;
+ if (startqp) {
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00df,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->mq_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->mq_lock);
+ goto que_failed;
+ }
+ vha->flags.qpairs_req_created = 1;
}
return req->id;
@@ -765,11 +769,12 @@ static void qla_do_work(struct work_struct *work)
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
{
int ret = 0;
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t *reg;
@@ -843,14 +848,17 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret)
goto que_failed;
- ret = qla25xx_init_rsp_que(base_vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_fatal, base_vha, 0x00e7,
- "%s failed.\n", __func__);
- mutex_lock(&ha->mq_lock);
- clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->mq_lock);
- goto que_failed;
+ if (startqp) {
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00e7,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->mq_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->mq_lock);
+ goto que_failed;
+ }
+ vha->flags.qpairs_rsp_created = 1;
}
rsp->req = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
new file mode 100644
index 000000000000..f3710a75fe1f
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -0,0 +1,761 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_nvme.h"
+#include "qla_def.h"
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+static struct nvme_fc_port_template qla_nvme_fc_transport;
+
+static void qla_nvme_unregister_remote_port(struct work_struct *);
+
+int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ struct nvme_rport *rport;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return 0;
+
+ if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
+ return 0;
+
+ if (!vha->flags.nvme_enabled) {
+ ql_log(ql_log_info, vha, 0x2100,
+ "%s: Not registering target since Host NVME is not enabled\n",
+ __func__);
+ return 0;
+ }
+
+ if (!(fcport->nvme_prli_service_param &
+ (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
+ return 0;
+
+ INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
+ rport = kzalloc(sizeof(*rport), GFP_KERNEL);
+ if (!rport) {
+ ql_log(ql_log_warn, vha, 0x2101,
+ "%s: unable to alloc memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ rport->req.port_name = wwn_to_u64(fcport->port_name);
+ rport->req.node_name = wwn_to_u64(fcport->node_name);
+ rport->req.port_role = 0;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
+ rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
+ rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
+ rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+ rport->req.port_id = fcport->d_id.b24;
+
+ ql_log(ql_log_info, vha, 0x2102,
+ "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
+ __func__, rport->req.port_name, rport->req.node_name,
+ rport->req.port_id);
+
+ ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
+ &fcport->nvme_remote_port);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0x212e,
+ "Failed to register remote port. Transport returned %d\n",
+ ret);
+ return ret;
+ }
+
+ fcport->nvme_remote_port->private = fcport;
+ fcport->nvme_flag |= NVME_FLAG_REGISTERED;
+ atomic_set(&fcport->nvme_ref_count, 1);
+ init_waitqueue_head(&fcport->nvme_waitQ);
+ rport->fcport = fcport;
+ list_add_tail(&rport->list, &vha->nvme_rport_list);
+ return 0;
+}
+
+/* Allocate a queue for NVMe traffic */
+static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
+ u16 qsize, void **handle)
+{
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+
+ if (!qidx)
+ qidx++;
+
+ vha = (struct scsi_qla_host *)lport->private;
+ ha = vha->hw;
+
+ ql_log(ql_log_info, vha, 0x2104,
+ "%s: handle %p, idx =%d, qsize %d\n",
+ __func__, handle, qidx, qsize);
+
+ if (qidx > qla_nvme_fc_transport.max_hw_queues) {
+ ql_log(ql_log_warn, vha, 0x212f,
+ "%s: Illegal qidx=%d. Max=%d\n",
+ __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
+ return -EINVAL;
+ }
+
+ if (ha->queue_pair_map[qidx]) {
+ *handle = ha->queue_pair_map[qidx];
+ ql_log(ql_log_info, vha, 0x2121,
+ "Returning existing qpair of %p for idx=%x\n",
+ *handle, qidx);
+ return 0;
+ }
+
+ ql_log(ql_log_warn, vha, 0xffff,
+ "allocating q for idx=%x w/o cpu mask\n", qidx);
+ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x2122,
+ "Failed to allocate qpair\n");
+ return -EINVAL;
+ }
+ *handle = qpair;
+
+ return 0;
+}
+
+static void qla_nvme_sp_ls_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct srb_iocb *nvme;
+ struct nvmefc_ls_req *fd;
+ struct nvme_private *priv;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
+ "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+ return;
+ }
+
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+ priv = fd->private;
+ priv->comp_status = res;
+ schedule_work(&priv->ls_work);
+ /* work schedule doesn't need the sp */
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct srb_iocb *nvme;
+ struct nvmefc_fcp_req *fd;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ goto rel;
+
+ if (unlikely(nvme->u.nvme.comp_status || res))
+ fd->status = -EINVAL;
+ else
+ fd->status = 0;
+
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->done(fd);
+rel:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+ struct nvme_private *priv = fd->private;
+ fc_port_t *fcport = rport->private;
+ srb_t *sp = priv->sp;
+ int rval;
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, fcport->vha, 0x2125,
+ "%s: failed to abort LS command for SP:%p rval=%x\n",
+ __func__, sp, rval);
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
+ "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+ struct nvmefc_ls_req *fd = priv->fd;
+
+ fd->done(fd, priv->comp_status);
+}
+
+static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+ fc_port_t *fcport = (fc_port_t *)rport->private;
+ struct srb_iocb *nvme;
+ struct nvme_private *priv = fd->private;
+ struct scsi_qla_host *vha;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return rval;
+
+ vha = fcport->vha;
+ ha = vha->hw;
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ return rval;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_ls_done;
+ atomic_set(&sp->ref_count, 1);
+ init_waitqueue_head(&sp->nvme_ls_waitQ);
+ nvme = &sp->u.iocb_cmd;
+ priv->sp = sp;
+ priv->fd = fd;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
+ nvme->u.nvme.desc = fd;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.cmd_len = fd->rqstlen;
+ nvme->u.nvme.rsp_len = fd->rsplen;
+ nvme->u.nvme.rsp_dma = fd->rspdma;
+ nvme->u.nvme.timeout_sec = fd->timeout;
+ nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+ fd->rqstlen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
+ atomic_dec(&sp->ref_count);
+ wake_up(&sp->nvme_ls_waitQ);
+ return rval;
+ }
+
+ return rval;
+}
+
+static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+ struct nvmefc_fcp_req *fd)
+{
+ struct nvme_private *priv = fd->private;
+ srb_t *sp = priv->sp;
+ int rval;
+ fc_port_t *fcport = rport->private;
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (!rval)
+ ql_log(ql_log_warn, fcport->vha, 0x2127,
+ "%s: failed to abort command for SP:%p rval=%x\n",
+ __func__, sp, rval);
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x2126,
+ "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+}
+
+static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
+{
+ struct scsi_qla_host *vha = lport->private;
+ unsigned long flags;
+ struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+}
+
+static int qla2x00_start_nvme_mq(srb_t *sp)
+{
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_nvme *cmd_pkt;
+ uint16_t cnt, i;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct scatterlist *sgl, *sg;
+ struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ uint32_t rval = QLA_SUCCESS;
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ tot_dsds = fd->sg_cnt;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds) {
+ rval = -1;
+ goto queuing_error;
+ }
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+
+ if (req->cnt < (req_cnt + 2)){
+ rval = -1;
+ goto queuing_error;
+ }
+ }
+
+ if (unlikely(!fd->sqid)) {
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+ if (cmd->sqe.common.opcode == nvme_admin_async_event) {
+ nvme->u.nvme.aen_op = 1;
+ atomic_inc(&vha->nvme_active_aen_cnt);
+ }
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ cmd_pkt->entry_status = 0;
+
+ /* Update entry type to indicate Command NVME IOCB */
+ cmd_pkt->entry_type = COMMAND_NVME;
+
+ /* No data transfer how do we check buffer len == 0?? */
+ if (fd->io_dir == NVMEFC_FCP_READ) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+ vha->qla_stats.input_bytes += fd->payload_length;
+ vha->qla_stats.input_requests++;
+ } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+ vha->qla_stats.output_bytes += fd->payload_length;
+ vha->qla_stats.output_requests++;
+ } else if (fd->io_dir == 0) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+ }
+
+ /* Set NPORT-ID */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ /* NVME RSP IU */
+ cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
+ cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
+ cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+
+ /* NVME CNMD IU */
+ cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
+ cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
+ cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+ cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
+
+ /* One DSD is available in the Command Type NVME IOCB */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+ sgl = fd->first_sgl;
+
+ /* Load data segments */
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+
+ /* Adjust ring index */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+ *((uint32_t *)(&cont_pkt->entry_type)) =
+ cpu_to_le32(CONTINUE_A64_TYPE);
+
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+
+ /* Set total entry count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+queuing_error:
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return rval;
+}
+
+/* Post a command */
+static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+ struct nvmefc_fcp_req *fd)
+{
+ fc_port_t *fcport;
+ struct srb_iocb *nvme;
+ struct scsi_qla_host *vha;
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+ struct nvme_private *priv;
+
+ if (!fd) {
+ ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
+ return rval;
+ }
+
+ priv = fd->private;
+ fcport = (fc_port_t *)rport->private;
+ if (!fcport) {
+ ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
+ return rval;
+ }
+
+ vha = fcport->vha;
+ if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
+ return -EBUSY;
+
+ /* Alloc SRB structure */
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ return -EIO;
+
+ atomic_set(&sp->ref_count, 1);
+ init_waitqueue_head(&sp->nvme_ls_waitQ);
+ priv->sp = sp;
+ sp->type = SRB_NVME_CMD;
+ sp->name = "nvme_cmd";
+ sp->done = qla_nvme_sp_done;
+ sp->qpair = qpair;
+ nvme = &sp->u.iocb_cmd;
+ nvme->u.nvme.desc = fd;
+
+ rval = qla2x00_start_nvme_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x212d,
+ "qla2x00_start_nvme_mq failed = %d\n", rval);
+ atomic_dec(&sp->ref_count);
+ wake_up(&sp->nvme_ls_waitQ);
+ return -EIO;
+ }
+
+ return rval;
+}
+
+static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
+{
+ struct scsi_qla_host *vha = lport->private;
+
+ atomic_dec(&vha->nvme_ref_count);
+ wake_up_all(&vha->nvme_waitQ);
+
+ ql_log(ql_log_info, vha, 0x210f,
+ "localport delete of %p completed.\n", vha->nvme_local_port);
+ vha->nvme_local_port = NULL;
+}
+
+static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
+{
+ fc_port_t *fcport;
+ struct nvme_rport *r_port, *trport;
+
+ fcport = (fc_port_t *)rport->private;
+ fcport->nvme_remote_port = NULL;
+ fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
+ atomic_dec(&fcport->nvme_ref_count);
+ wake_up_all(&fcport->nvme_waitQ);
+
+ list_for_each_entry_safe(r_port, trport,
+ &fcport->vha->nvme_rport_list, list) {
+ if (r_port->fcport == fcport) {
+ list_del(&r_port->list);
+ break;
+ }
+ }
+ kfree(r_port);
+
+ ql_log(ql_log_info, fcport->vha, 0x2110,
+ "remoteport_delete of %p completed.\n", fcport);
+}
+
+static struct nvme_fc_port_template qla_nvme_fc_transport = {
+ .localport_delete = qla_nvme_localport_delete,
+ .remoteport_delete = qla_nvme_remoteport_delete,
+ .create_queue = qla_nvme_alloc_queue,
+ .delete_queue = NULL,
+ .ls_req = qla_nvme_ls_req,
+ .ls_abort = qla_nvme_ls_abort,
+ .fcp_io = qla_nvme_post_cmd,
+ .fcp_abort = qla_nvme_fcp_abort,
+ .poll_queue = qla_nvme_poll,
+ .max_hw_queues = 8,
+ .max_sgl_segments = 128,
+ .max_dif_sgl_segments = 64,
+ .dma_boundary = 0xFFFFFFFF,
+ .local_priv_sz = 8,
+ .remote_priv_sz = 0,
+ .lsrqst_priv_sz = sizeof(struct nvme_private),
+ .fcprqst_priv_sz = sizeof(struct nvme_private),
+};
+
+#define NVME_ABORT_POLLING_PERIOD 2
+static int qla_nvme_wait_on_command(srb_t *sp)
+{
+ int ret = QLA_SUCCESS;
+
+ wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1),
+ NVME_ABORT_POLLING_PERIOD*HZ);
+
+ if (atomic_read(&sp->ref_count) > 1)
+ ret = QLA_FUNCTION_FAILED;
+
+ return ret;
+}
+
+static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
+{
+ int ret = QLA_SUCCESS;
+
+ wait_event_timeout(fcport->nvme_waitQ,
+ atomic_read(&fcport->nvme_ref_count),
+ NVME_ABORT_POLLING_PERIOD*HZ);
+
+ if (atomic_read(&fcport->nvme_ref_count)) {
+ ret = QLA_FUNCTION_FAILED;
+ ql_log(ql_log_info, fcport->vha, 0x2111,
+ "timed out waiting for fcport=%p to delete\n", fcport);
+ }
+
+ return ret;
+}
+
+void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
+{
+ int rval;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (!rval) {
+ if (!qla_nvme_wait_on_command(sp))
+ ql_log(ql_log_warn, NULL, 0x2112,
+ "nvme_wait_on_command timed out waiting on sp=%p\n",
+ sp);
+ }
+}
+
+static void qla_nvme_abort_all(fc_port_t *fcport)
+{
+ int que, cnt;
+ unsigned long flags;
+ srb_t *sp;
+ struct qla_hw_data *ha = fcport->vha->hw;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if ((sp) && ((sp->type == SRB_NVME_CMD) ||
+ (sp->type == SRB_NVME_LS)) &&
+ (sp->fcport == fcport)) {
+ atomic_inc(&sp->ref_count);
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ req->outstanding_cmds[cnt] = NULL;
+ sp->done(sp, 1);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qla_nvme_unregister_remote_port(struct work_struct *work)
+{
+ struct fc_port *fcport = container_of(work, struct fc_port,
+ nvme_del_work);
+ struct nvme_rport *rport, *trport;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ list_for_each_entry_safe(rport, trport,
+ &fcport->vha->nvme_rport_list, list) {
+ if (rport->fcport == fcport) {
+ ql_log(ql_log_info, fcport->vha, 0x2113,
+ "%s: fcport=%p\n", __func__, fcport);
+ nvme_fc_unregister_remoteport(
+ fcport->nvme_remote_port);
+ }
+ }
+}
+
+void qla_nvme_delete(scsi_qla_host_t *vha)
+{
+ struct nvme_rport *rport, *trport;
+ fc_port_t *fcport;
+ int nv_ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
+ fcport = rport->fcport;
+
+ ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
+ __func__, fcport);
+
+ nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ qla_nvme_wait_on_rport_del(fcport);
+ qla_nvme_abort_all(fcport);
+ }
+
+ if (vha->nvme_local_port) {
+ nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
+ if (nv_ret == 0)
+ ql_log(ql_log_info, vha, 0x2116,
+ "unregistered localport=%p\n",
+ vha->nvme_local_port);
+ else
+ ql_log(ql_log_info, vha, 0x2115,
+ "Unregister of localport failed\n");
+ }
+}
+
+void qla_nvme_register_hba(scsi_qla_host_t *vha)
+{
+ struct nvme_fc_port_template *tmpl;
+ struct qla_hw_data *ha;
+ struct nvme_fc_port_info pinfo;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ ha = vha->hw;
+ tmpl = &qla_nvme_fc_transport;
+
+ WARN_ON(vha->nvme_local_port);
+ WARN_ON(ha->max_req_queues < 3);
+
+ qla_nvme_fc_transport.max_hw_queues =
+ min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
+ (uint8_t)(ha->max_req_queues - 2));
+
+ pinfo.node_name = wwn_to_u64(vha->node_name);
+ pinfo.port_name = wwn_to_u64(vha->port_name);
+ pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+ pinfo.port_id = vha->d_id.b24;
+
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
+ pinfo.port_name, pinfo.node_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "register_localport failed: ret=%x\n", ret);
+ return;
+ }
+ atomic_set(&vha->nvme_ref_count, 1);
+ vha->nvme_local_port->private = vha;
+ init_waitqueue_head(&vha->nvme_waitQ);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
new file mode 100644
index 000000000000..dfe56f207b28
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -0,0 +1,132 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NVME_H
+#define __QLA_NVME_H
+
+#include <linux/blk-mq.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/nvme-fc-driver.h>
+
+#define NVME_ATIO_CMD_OFF 32
+#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
+#define Q2T_NVME_NUM_TAGS 2048
+#define QLA_MAX_FC_SEGMENTS 64
+
+struct srb;
+struct nvme_private {
+ struct srb *sp;
+ struct nvmefc_ls_req *fd;
+ struct work_struct ls_work;
+ int comp_status;
+};
+
+struct nvme_rport {
+ struct nvme_fc_port_info req;
+ struct list_head list;
+ struct fc_port *fcport;
+};
+
+#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
+struct cmd_nvme {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+
+ uint64_t rsvd;
+
+ uint16_t control_flags; /* Control Flags */
+#define CF_NVME_ENABLE BIT_9
+#define CF_DIF_SEG_DESCR_ENABLE BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE BIT_2
+#define CF_READ_DATA BIT_1
+#define CF_WRITE_DATA BIT_0
+
+ uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */
+ uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t nvme_data_dseg_address[2]; /* Data segment address. */
+ uint32_t nvme_data_dseg_len; /* Data segment length. */
+};
+
+#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
+struct pt_ls4_request {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_define;
+ uint8_t entry_status;
+ uint32_t handle;
+ uint16_t status;
+ uint16_t nport_handle;
+ uint16_t tx_dseg_count;
+ uint8_t vp_index;
+ uint8_t rsvd;
+ uint16_t timeout;
+ uint16_t control_flags;
+#define CF_LS4_SHIFT 13
+#define CF_LS4_ORIGINATOR 0
+#define CF_LS4_RESPONDER 1
+#define CF_LS4_RESPONDER_TERM 2
+
+ uint16_t rx_dseg_count;
+ uint16_t rsvd2;
+ uint32_t exchange_address;
+ uint32_t rsvd3;
+ uint32_t rx_byte_count;
+ uint32_t tx_byte_count;
+ uint32_t dseg0_address[2];
+ uint32_t dseg0_len;
+ uint32_t dseg1_address[2];
+ uint32_t dseg1_len;
+};
+
+#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
+struct pt_ls4_rx_unsol {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint16_t rsvd0;
+ uint16_t rsvd1;
+ uint8_t vp_index;
+ uint8_t rsvd2;
+ uint16_t rsvd3;
+ uint16_t nport_handle;
+ uint16_t frame_size;
+ uint16_t rsvd4;
+ uint32_t exchange_address;
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t param;
+ uint32_t desc0;
+#define PT_LS4_PAYLOAD_OFFSET 0x2c
+#define PT_LS4_FIRST_PACKET_LEN 20
+ uint32_t desc_len;
+ uint32_t payload[3];
+};
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0a1723cc08cf..a77c33987703 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -782,7 +782,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
ql_log(ql_log_fatal, vha, 0xb009,
- "%s out of bount memory "
+ "%s out of bound memory "
"access, offset is 0x%llx.\n",
QLA2XXX_DRIVER_NAME, off);
return -1;
@@ -4250,7 +4250,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_p3p, vha, 0xb040,
"[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
- "entry_type: 0x%x, captrue_mask: 0x%x\n",
+ "entry_type: 0x%x, capture_mask: 0x%x\n",
__func__, i, data_ptr, entry_hdr,
entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 77624eac95a4..71a41093530e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -7,6 +7,8 @@
#ifndef __QLA_NX_H
#define __QLA_NX_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -819,21 +821,6 @@ struct qla82xx_uri_data_desc{
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel(((u32) (val)), (addr));
- writel(((u32) (val >> 32)), (addr + 4));
-}
-#endif
-
/* Request and response queue size */
#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index dc1ec9b61027..0aa9c38bf347 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1572,7 +1572,7 @@ qla8044_read_reset_template(struct scsi_qla_host *vha)
/* Copy rest of the template */
if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
ql_log(ql_log_fatal, vha, 0xb0bd,
- "%s: Failed to read reset tempelate\n", __func__);
+ "%s: Failed to read reset template\n", __func__);
goto exit_read_template_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 79f050256c55..df57655779ed 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,7 +120,11 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 32.");
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xenabledif;
+#else
int ql2xenabledif = 2;
+#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@@ -129,6 +133,16 @@ MODULE_PARM_DESC(ql2xenabledif,
" 1 -- Enable DIF for all types\n"
" 2 -- Enable DIF for all types, except Type 0.\n");
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xnvmeenable = 1;
+#else
+int ql2xnvmeenable;
+#endif
+module_param(ql2xnvmeenable, int, 0644);
+MODULE_PARM_DESC(ql2xnvmeenable,
+ "Enables NVME support. "
+ "0 - no NVMe. Default is Y");
+
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
@@ -224,11 +238,15 @@ MODULE_PARM_DESC(ql2xexlogins,
"Number of extended Logins. "
"0 (Default)- Disabled.");
-int ql2xexchoffld = 0;
-module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
+int ql2xexchoffld = 1024;
+module_param(ql2xexchoffld, uint, 0644);
MODULE_PARM_DESC(ql2xexchoffld,
- "Number of exchanges to offload. "
- "0 (Default)- Disabled.");
+ "Number of target exchanges.");
+
+int ql2xiniexchg = 1024;
+module_param(ql2xiniexchg, uint, 0644);
+MODULE_PARM_DESC(ql2xiniexchg,
+ "Number of initiator exchanges.");
int ql2xfwholdabts = 0;
module_param(ql2xfwholdabts, int, S_IRUGO);
@@ -263,6 +281,7 @@ static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -347,6 +366,28 @@ int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
+static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ rsp->qpair = ha->base_qpair;
+ rsp->req = req;
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ ha->base_qpair->vha = vha;
+ ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
+ ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
+ ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+ INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ ha->base_qpair->enable_class_2 = ql2xenableclass2;
+ /* init qpair to this cpu. Will adjust at run time. */
+ qla_cpu_update(rsp->qpair, smp_processor_id());
+ ha->base_qpair->pdev = ha->pdev;
+
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+}
+
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
@@ -367,6 +408,15 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
goto fail_rsp_map;
}
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x00e0,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+
+ qla_init_base_qpair(vha, req, rsp);
+
if (ql2xmqsupport && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
@@ -375,14 +425,6 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for queue pair ptrs.\n");
goto fail_qpair_map;
}
- ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
- if (ha->base_qpair == NULL) {
- ql_log(ql_log_warn, vha, 0x0182,
- "Failed to allocate base queue pair memory.\n");
- goto fail_base_qpair;
- }
- ha->base_qpair->req = req;
- ha->base_qpair->rsp = rsp;
}
/*
@@ -395,9 +437,10 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
-fail_base_qpair:
- kfree(ha->queue_pair_map);
fail_qpair_map:
+ kfree(ha->base_qpair);
+ ha->base_qpair = NULL;
+fail_base_qpair:
kfree(ha->rsp_q_map);
ha->rsp_q_map = NULL;
fail_rsp_map:
@@ -447,6 +490,15 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
int cnt;
unsigned long flags;
+ if (ha->queue_pair_map) {
+ kfree(ha->queue_pair_map);
+ ha->queue_pair_map = NULL;
+ }
+ if (ha->base_qpair) {
+ kfree(ha->base_qpair);
+ ha->base_qpair = NULL;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
@@ -658,8 +710,10 @@ qla2x00_sp_free_dma(void *ptr)
}
end:
- CMD_SP(cmd) = NULL;
- qla2x00_rel_sp(sp);
+ if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
+ CMD_SP(cmd) = NULL;
+ qla2x00_rel_sp(sp);
+ }
}
void
@@ -1062,9 +1116,9 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
int res;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "tgt %p, fcport_count=%d\n",
- vha, vha->fcport_count);
+ ql_dbg(ql_dbg_init, vha, 0x00ec,
+ "tgt %p, fcport_count=%d\n",
+ vha, vha->fcport_count);
res = (vha->fcport_count == 0);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -1645,8 +1699,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
-
- qlt_host_reset_handler(ha);
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_cmd *cmd;
+ uint8_t trace = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -1658,27 +1713,65 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- /* Don't abort commands in adapter during EEH
- * recovery as it's not accessible/responding.
- */
- if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
- (sp->type == SRB_SCSI_CMD)) {
- /* Get a reference to the sp and drop the lock.
- * The reference ensures this sp->done() call
- * - and not the call in qla2xxx_eh_abort() -
- * ends the SCSI command (with result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(GET_CMD_SP(sp));
- spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Get rid of extra reference if immediate exit
- * from ql2xxx_eh_abort */
- if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
- atomic_dec(&sp->ref_count);
- }
req->outstanding_cmds[cnt] = NULL;
- sp->done(sp, res);
+ if (sp->cmd_type == TYPE_SRB) {
+ if ((sp->type == SRB_NVME_CMD) ||
+ (sp->type == SRB_NVME_LS)) {
+ sp_get(sp);
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(
+ &ha->hardware_lock, flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(
+ &ha->hardware_lock, flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
+ }
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha,
+ cmd);
+ }
}
}
}
@@ -1957,7 +2050,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix + 1;
+ ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
/*
* By default, driver uses at least two msix vectors
* (default & rspq)
@@ -1975,7 +2068,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
"Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
@@ -2653,7 +2746,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
- ha->tgt.enable_class_2 = ql2xenableclass2;
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
spin_lock_init(&ha->tgt.sess_lock);
@@ -3073,12 +3165,26 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ if (ha->mqenable) {
+ bool mq = false;
+ bool startit = false;
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- /* Create start of day qpairs for Block MQ */
- if (shost_use_blk_mq(host)) {
+
+ if (QLA_TGT_MODE_ENABLED()) {
+ mq = true;
+ startit = false;
+ }
+
+ if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
+ shost_use_blk_mq(host)) {
+ mq = true;
+ startit = true;
+ }
+
+ if (mq) {
+ /* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++)
- qla2xxx_create_qpair(base_vha, 5, 0);
+ qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
}
@@ -3451,6 +3557,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ qla_nvme_delete(base_vha);
+
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3601,10 +3710,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
} else {
int now;
if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phN. rport %p roles %x \n",
- __func__, fcport->port_name, rport,
- rport->roles);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
}
qlt_do_generation_tick(vha, &now);
@@ -3649,7 +3758,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
if (fcport->login_retry == 0) {
fcport->login_retry = vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0x2067,
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
"Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
fcport->port_name, fcport->loop_id, fcport->login_retry);
}
@@ -3673,8 +3782,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
{
fc_port_t *fcport;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Mark all dev lost\n");
+ ql_dbg(ql_dbg_disc, vha, 0x20f1,
+ "Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
@@ -3986,6 +4095,9 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
if (!ql2xexlogins)
return QLA_SUCCESS;
+ if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
+ return QLA_SUCCESS;
+
ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
max_cnt = 0;
rval = qla_get_exlogin_status(vha, &size, &max_cnt);
@@ -3996,27 +4108,33 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
}
temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
- ha->exlogin_size = (size * temp);
- ql_log(ql_log_info, vha, 0xd024,
- "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
- max_cnt, size, temp);
-
- ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
- ha->exlogin_size);
-
- /* Get consistent memory for extended logins */
- ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
- ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
- if (!ha->exlogin_buf) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
+ temp *= size;
+
+ if (temp != ha->exlogin_size) {
+ qla2x00_free_exlogin_buffer(ha);
+ ha->exlogin_size = temp;
+
+ ql_log(ql_log_info, vha, 0xd024,
+ "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd025,
+ "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
+ if (!ha->exlogin_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
"Failed to allocate memory for exlogin_buf_dma.\n");
- return -ENOMEM;
+ return -ENOMEM;
+ }
}
/* Now configure the dma buffer */
rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
if (rval) {
- ql_log(ql_log_fatal, vha, 0x00cf,
+ ql_log(ql_log_fatal, vha, 0xd033,
"Setup extended login buffer ****FAILED****.\n");
qla2x00_free_exlogin_buffer(ha);
}
@@ -4041,19 +4159,50 @@ qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
}
}
+static void
+qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
+{
+ u32 temp;
+ *ret_cnt = FW_DEF_EXCHANGES_CNT;
+
+ if (qla_ini_mode_enabled(vha)) {
+ if (ql2xiniexchg > max_cnt)
+ ql2xiniexchg = max_cnt;
+
+ if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = ql2xiniexchg;
+ } else if (qla_tgt_mode_enabled(vha)) {
+ if (ql2xexchoffld > max_cnt)
+ ql2xexchoffld = max_cnt;
+
+ if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = ql2xexchoffld;
+ } else if (qla_dual_mode_enabled(vha)) {
+ temp = ql2xiniexchg + ql2xexchoffld;
+ if (temp > max_cnt) {
+ ql2xiniexchg -= (temp - max_cnt)/2;
+ ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+ temp = max_cnt;
+ }
+
+ if (temp > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = temp;
+ }
+}
+
int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- uint16_t size, max_cnt, temp;
+ u16 size, max_cnt;
+ u32 temp;
struct qla_hw_data *ha = vha->hw;
- /* Return if we don't need to alloacate any extended logins */
- if (!ql2xexchoffld)
+ if (!ha->flags.exchoffld_enabled)
return QLA_SUCCESS;
- ql_log(ql_log_info, vha, 0xd014,
- "Exchange offload count: %d.\n", ql2xexlogins);
+ if (!IS_EXCHG_OFFLD_CAPABLE(ha))
+ return QLA_SUCCESS;
max_cnt = 0;
rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
@@ -4063,30 +4212,45 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
- ha->exchoffld_size = (size * temp);
- ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
-
- ql_log(ql_log_info, vha, 0xd017,
- "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
+ qla2x00_number_of_exch(vha, &temp, max_cnt);
+ temp *= size;
- /* Get consistent memory for extended logins */
- ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
- ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
- if (!ha->exchoffld_buf) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
- return -ENOMEM;
+ if (temp != ha->exchoffld_size) {
+ qla2x00_free_exchoffld_buffer(ha);
+ ha->exchoffld_size = temp;
+
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd017,
+ "Exchange Buffers requested size = 0x%x\n",
+ ha->exchoffld_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
+ if (!ha->exchoffld_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Failed to allocate memory for exchoffld_buf_dma.\n");
+ return -ENOMEM;
+ }
}
/* Now configure the dma buffer */
- rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
+ rval = qla_set_exchoffld_mem_cfg(vha);
if (rval) {
ql_log(ql_log_fatal, vha, 0xd02e,
"Setup exchange offload buffer ****FAILED****.\n");
qla2x00_free_exchoffld_buffer(ha);
+ } else {
+ /* re-adjust number of target exchange */
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
+
+ if (qla_ini_mode_enabled(vha))
+ icb->exchange_count = 0;
+ else
+ icb->exchange_count = cpu_to_le16(ql2xexchoffld);
}
return rval;
@@ -4292,6 +4456,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
+ INIT_LIST_HEAD(&vha->nvme_rport_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4303,7 +4468,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
if (!vha->gnl.l) {
- ql_log(ql_log_fatal, vha, 0xffff,
+ ql_log(ql_log_fatal, vha, 0xd04a,
"Alloc failed for name list.\n");
scsi_remove_host(vha->host);
return NULL;
@@ -4581,6 +4746,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_async_gpdb(vha, e->u.fcport.fcport,
e->u.fcport.opt);
break;
+ case QLA_EVT_PRLI:
+ qla24xx_async_prli(vha, e->u.fcport.fcport);
+ break;
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
@@ -4620,7 +4788,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
fcport->fw_login_state);
@@ -5800,6 +5968,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
+ if (sp->cmd_type != TYPE_SRB)
+ continue;
if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
@@ -5851,6 +6021,18 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (!list_empty(&vha->work_list))
start_dpc++;
+ /*
+ * FC-NVME
+ * see if the active AEN count has changed from what was last reported.
+ */
+ if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) {
+ vha->nvme_last_rptd_aen =
+ atomic_read(&vha->nvme_active_aen_cnt);
+ ql_log(ql_log_info, vha, 0x3002,
+ "reporting new aen count of %d to the fw\n",
+ vha->nvme_last_rptd_aen);
+ }
+
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e766d8412384..2a0173e5d10e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -59,13 +59,20 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 50;
+static int ql_dm_tgt_ex_pct = 0;
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
"For Dual Mode (qlini_mode=dual), this parameter determines "
"the percentage of exchanges/cmds FW will allocate resources "
"for Target mode.");
+int ql2xuctrlirq = 1;
+module_param(ql2xuctrlirq, int, 0644);
+MODULE_PARM_DESC(ql2xuctrlirq,
+ "User to control IRQ placement via smp_affinity."
+ "Valid with qlini_mode=disabled."
+ "1(default): enable");
+
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
static int temp_sam_status = SAM_STAT_BUSY;
@@ -110,18 +117,17 @@ enum fcp_resp_rsp_codes {
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
struct atio_from_isp *pkt, uint8_t);
-static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
+ response_t *pkt);
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
-static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
-static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
-static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
struct imm_ntfy_from_isp *ntfy,
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
@@ -132,6 +138,8 @@ static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
void qlt_unreg_sess(struct fc_port *sess);
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
struct abts_recv_from_24xx *);
+static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
+ uint16_t);
/*
* Global Variables
@@ -200,8 +208,8 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "Unable to find host %06x\n", key);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Unable to find host %06x\n", key);
return host;
}
@@ -245,26 +253,22 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
- struct atio_from_isp *atio, uint8_t ha_locked)
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_tgt_sess_op *u;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "qla_target(%d): dropping unknown ATIO_TYPE7, "
- "because tgt is being stopped", vha->vp_idx);
+ ql_dbg(ql_dbg_async, vha, 0x502c,
+ "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
+ vha->vp_idx);
goto out_term;
}
u = kzalloc(sizeof(*u), GFP_ATOMIC);
- if (u == NULL) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u));
- /* It should be harmless and on the next retry should work well */
+ if (u == NULL)
goto out_term;
- }
u->vha = vha;
memcpy(&u->atio, atio, sizeof(*atio));
@@ -280,7 +284,7 @@ out:
return;
out_term:
- qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
goto out;
}
@@ -295,29 +299,28 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
if (u->aborted) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Freeing unknown %s %p, because of Abort",
+ ql_dbg(ql_dbg_async, vha, 0x502e,
+ "Freeing unknown %s %p, because of Abort\n",
"ATIO_TYPE7", u);
- qlt_send_term_exchange(vha, NULL, &u->atio,
- ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+ &u->atio, ha_locked, 0);
goto abort;
}
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Requeuing unknown ATIO_TYPE7 %p", u);
+ ql_dbg(ql_dbg_async, vha, 0x502f,
+ "Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Freeing unknown %s %p, because tgt is being stopped",
- "ATIO_TYPE7", u);
- qlt_send_term_exchange(vha, NULL, &u->atio,
- ha_locked, 0);
+ ql_dbg(ql_dbg_async, vha, 0x503a,
+ "Freeing unknown %s %p, because tgt is being stopped\n",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+ &u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "u %p, vha %p, host %p, sched again..", u,
- vha, host);
+ ql_dbg(ql_dbg_async, vha, 0x503d,
+ "Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
schedule_delayed_work(&vha->unknown_atio_work,
@@ -380,6 +383,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)atio;
+ qlt_issue_marker(vha, ha_locked);
+
if ((entry->u.isp24.vp_index != 0xFF) &&
(entry->u.isp24.nport_handle != 0xFFFF)) {
host = qlt_find_host_by_vp_idx(vha,
@@ -411,7 +416,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(!host)) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
"received, with unknown vp_index %d\n",
vha->vp_idx, entry->vp_index);
@@ -437,7 +442,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
return false;
}
-void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
{
switch (pkt->entry_type) {
case CTIO_CRC2:
@@ -456,7 +462,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -474,7 +480,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
vha->vp_idx, entry->u.isp24.vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -496,7 +502,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
break;
}
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -513,7 +519,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
"vp_index %d\n", vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -530,12 +536,12 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
"vp_index %d\n", vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
default:
- qlt_response_pkt(vha, pkt);
+ qlt_response_pkt(vha, rsp, pkt);
break;
}
@@ -565,13 +571,13 @@ void qla2x00_async_nack_sp_done(void *s, int res)
struct scsi_qla_host *vha = sp->vha;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s res %x %8phC type %d\n",
- sp->name, res, sp->fcport->port_name, sp->type);
+ ql_dbg(ql_dbg_disc, vha, 0x20f2,
+ "Async done-%s res %x %8phC type %d\n",
+ sp->name, res, sp->fcport->port_name, sp->type);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- sp->fcport->chip_reset = vha->hw->chip_reset;
+ sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
switch (sp->type) {
case SRB_NACK_PLOGI:
@@ -593,19 +599,19 @@ void qla2x00_async_nack_sp_done(void *s, int res)
if (!IS_IIDMA_CAPABLE(vha->hw) ||
!vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
+ ql_dbg(ql_dbg_disc, vha, 0x20f5,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
qla24xx_post_gpsc_work(vha, sp->fcport);
}
@@ -664,9 +670,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hndl %x %s\n",
- sp->name, fcport->port_name, sp->handle, c);
+ ql_dbg(ql_dbg_disc, vha, 0x20f4,
+ "Async-%s %8phC hndl %x %s\n",
+ sp->name, fcport->port_name, sp->handle, c);
return rval;
@@ -688,7 +694,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
t = qlt_create_sess(vha, e->u.nack.fcport, 0);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (t) {
- ql_log(ql_log_info, vha, 0xffff,
+ ql_log(ql_log_info, vha, 0xd034,
"%s create sess success %p", __func__, t);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create sess has an extra kref */
@@ -757,7 +763,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2107,
"%s: kref_get fail sess %8phC \n",
__func__, sess->port_name);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -957,7 +963,6 @@ static void qlt_free_session_done(struct work_struct *work)
sess->logout_on_delete, sess->keep_nport_handle,
sess->send_els_logo);
-
if (!IS_SW_RESV_ADDR(sess->d_id)) {
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@@ -1026,7 +1031,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != sess->vha->hw->chip_reset)
+ if (sess->chip_reset != ha->base_qpair->chip_reset)
qla2x00_clear_loop_id(sess);
if (sess->conflict) {
@@ -1098,7 +1103,7 @@ void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
- ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
@@ -1112,6 +1117,9 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
+ if (sess->nvme_flag & NVME_FLAG_REGISTERED)
+ schedule_work(&sess->nvme_del_work);
+
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
@@ -1156,7 +1164,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
- if (sess->chip_reset != sess->vha->hw->chip_reset) {
+ if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
sess->logout_on_delete = 0;
sess->logo_ack_needed = 0;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
@@ -1288,7 +1296,7 @@ static struct fc_port *qlt_create_sess(
if (fcport->se_sess) {
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f6,
"%s: kref_get_unless_zero failed for %8phC\n",
__func__, sess->port_name);
return NULL;
@@ -1310,7 +1318,7 @@ static struct fc_port *qlt_create_sess(
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess) < 0) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
"(%d) %8phC check_initiator_node_acl failed\n",
vha->vp_idx, fcport->port_name);
return NULL;
@@ -1321,7 +1329,7 @@ static struct fc_port *qlt_create_sess(
* fc_port access across ->tgt.sess_lock reaquire.
*/
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f7,
"%s: kref_get_unless_zero failed for %8phC\n",
__func__, sess->port_name);
return NULL;
@@ -1432,6 +1440,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
if (npiv_vports) {
mutex_unlock(&qla_tgt_mutex);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "NPIV is in use. Can not stop target\n");
return -EPERM;
}
}
@@ -1442,7 +1452,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
return -EPERM;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
vha->host_no, vha);
/*
* Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
@@ -1485,9 +1495,7 @@ EXPORT_SYMBOL(qlt_stop_phase1);
/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
- struct qla_hw_data *ha = tgt->ha;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- unsigned long flags;
+ scsi_qla_host_t *vha = tgt->vha;
if (tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
@@ -1495,24 +1503,19 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
dump_stack();
return;
}
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
- "Waiting for %d IRQ commands to complete (tgt %p)",
- tgt->irq_cmd_count, tgt);
+ if (!tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
+ "%s: phase1 stop is not completed\n", __func__);
+ dump_stack();
+ return;
+ }
mutex_lock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- udelay(2);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- }
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -1521,10 +1524,36 @@ EXPORT_SYMBOL(qlt_stop_phase2);
static void qlt_release(struct qla_tgt *tgt)
{
scsi_qla_host_t *vha = tgt->vha;
+ void *node;
+ u64 key = 0;
+ u16 i;
+ struct qla_qpair_hint *h;
+
+ if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
+ !tgt->tgt_stopped)
+ qlt_stop_phase1(tgt);
if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
qlt_stop_phase2(tgt);
+ for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
+ unsigned long flags;
+
+ h = &tgt->qphints[i];
+ if (h->qpair) {
+ spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
+ list_del(&h->hint_elem);
+ spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
+ h->qpair = NULL;
+ }
+ }
+ kfree(tgt->qphints);
+
+ btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+ btree_remove64(&tgt->lun_qpair_map, key);
+
+ btree_destroy64(&tgt->lun_qpair_map);
+
vha->vha_tgt.qla_tgt = NULL;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
@@ -1568,11 +1597,12 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
struct imm_ntfy_from_isp *ntfy,
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
struct nack_to_isp *nack;
@@ -1582,11 +1612,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
-
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
if (!pkt) {
ql_dbg(ql_dbg_tgt, vha, 0xe049,
"qla_target(%d): %s failed: unable to allocate "
@@ -1627,16 +1653,17 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ qla2x00_start_iocbs(vha, qpair->req);
}
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct abts_recv_from_24xx *abts, uint32_t status,
bool ids_reversed)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
uint32_t f_ctl;
@@ -1646,11 +1673,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
"Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
ha, abts, status);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
-
- resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
+ NULL);
if (!resp) {
ql_dbg(ql_dbg_tgt, vha, 0xe04a,
"qla_target(%d): %s failed: unable to allocate "
@@ -1706,7 +1730,10 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
}
/*
@@ -1719,11 +1746,9 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe007,
"Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
+ vha->hw->base_qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1754,7 +1779,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
wmb();
qla2x00_start_iocbs(vha, vha->req);
- qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ qlt_24xx_send_abts_resp(vha->hw->base_qpair,
+ (struct abts_recv_from_24xx *)entry,
FCP_TMF_CMPL, true);
}
@@ -1762,13 +1788,13 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
{
struct qla_tgt_sess_op *op;
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
- spin_lock(&vha->cmd_list_lock);
-
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
if (tag == op->atio.u.isp24.exchange_addr) {
op->aborted = true;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
@@ -1776,7 +1802,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
if (tag == op->atio.u.isp24.exchange_addr) {
op->aborted = true;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
@@ -1784,12 +1810,12 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
if (tag == cmd->atio.u.isp24.exchange_addr) {
cmd->aborted = 1;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- spin_unlock(&vha->cmd_list_lock);
return 0;
}
@@ -1799,17 +1825,18 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
* for the same lun)
*/
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
- uint32_t lun, uint8_t *s_id)
+ u64 lun, uint8_t *s_id)
{
struct qla_tgt_sess_op *op;
struct qla_tgt_cmd *cmd;
uint32_t key;
+ unsigned long flags;
key = sid_to_key(s_id);
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key;
- uint32_t op_lun;
+ u64 op_lun;
op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
op_lun = scsilun_to_int(
@@ -1831,7 +1858,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key;
- uint32_t cmd_lun;
+ u64 cmd_lun;
cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
cmd_lun = scsilun_to_int(
@@ -1839,7 +1866,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
if (cmd_key == key && cmd_lun == lun)
cmd->aborted = 1;
}
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
/* ha->hardware_lock supposed to be held on entry */
@@ -1849,18 +1876,15 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct qla_tgt_cmd *cmd;
struct se_cmd *se_cmd;
- u32 lun = 0;
int rc;
bool found_lun = false;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
- struct qla_tgt_cmd *cmd =
- container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (se_cmd->tag == abts->exchange_addr_to_abort) {
- lun = cmd->unpacked_lun;
found_lun = true;
break;
}
@@ -1871,7 +1895,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (!found_lun) {
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
/* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts,
+ FCP_TMF_CMPL, false);
return 0;
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
@@ -1894,12 +1919,14 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
memset(mcmd, 0, sizeof(*mcmd));
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
- mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_ABTS;
+ mcmd->qpair = ha->base_qpair;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -1929,7 +1956,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
"qla_target(%d): ABTS: Abort Sequence not "
"supported\n", vha->vp_idx);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1937,7 +1965,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
"qla_target(%d): ABTS: Unknown Exchange "
"Address received\n", vha->vp_idx);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1963,8 +1992,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (rc != 0) {
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
- false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts,
+ FCP_TMF_REJECTED, false);
}
return;
}
@@ -1972,7 +2001,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (sess->deleted) {
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1981,7 +2011,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
"qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
vha->vp_idx, rc);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
}
@@ -1989,9 +2020,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
+ struct scsi_qla_host *ha = qpair->vha;
struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
struct ctio7_to_24xx *ctio;
uint16_t temp;
@@ -2000,11 +2032,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
"Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
ha, atio, resp_code);
- /* Send marker if required */
- if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
- return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, ha, 0xe04c,
"qla_target(%d): %s failed: unable to allocate "
@@ -2022,8 +2051,9 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
- ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = (atio->u.isp24.attr << 9)|
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+ ctio->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
@@ -2033,7 +2063,10 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(ha, ha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(ha, qpair->req);
}
void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
@@ -2046,12 +2079,13 @@ EXPORT_SYMBOL(qlt_free_mcmd);
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
-void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
struct atio_from_isp *atio = &cmd->atio;
struct ctio7_to_24xx *ctio;
uint16_t temp;
+ struct scsi_qla_host *vha = cmd->vha;
ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
"Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
@@ -2076,8 +2110,9 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
- ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = (atio->u.isp24.attr << 9) |
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+ ctio->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
@@ -2101,7 +2136,11 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
out:
return;
}
@@ -2112,14 +2151,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->sess->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ struct qla_qpair *qpair = mcmd->qpair;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
"TM response mcmd (%p) status %#x state %#x",
mcmd, mcmd->fc_tm_rsp, mcmd->flags);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
+ if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -2127,9 +2167,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
ql_dbg(ql_dbg_async, vha, 0xe100,
"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- mcmd->reset_count, ha->chip_reset);
+ mcmd->reset_count, qpair->chip_reset);
ha->tgt.tgt_ops->free_mcmd(mcmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
@@ -2140,21 +2180,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
ELS_PRLO ||
mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
ELS_TPRLO) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2106,
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
qlt_schedule_sess_for_deletion_lock(mcmd->sess);
} else {
- qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(vha->hw->base_qpair,
+ &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
}
} else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
- qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
- qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
mcmd->fc_tm_rsp);
}
/*
@@ -2166,7 +2206,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* qlt_xmit_tm_rsp() returns here..
*/
ha->tgt.tgt_ops->free_mcmd(mcmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2178,7 +2218,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2190,10 +2230,10 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
* If greater than four sg entries then we need to allocate
* the continuation entries
*/
- if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
- prm->tgt->datasegs_per_cmd,
- prm->tgt->datasegs_per_cont);
+ QLA_TGT_DATASEGS_PER_CMD_24XX,
+ QLA_TGT_DATASEGS_PER_CONT_24XX);
} else {
/* DIF */
if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
@@ -2205,7 +2245,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
+ prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2225,7 +2265,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
return 0;
out_err:
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
"qla_target(%d): PCI mapping failed: sg_cnt=%d",
0, prm->cmd->sg_cnt);
return -1;
@@ -2233,53 +2273,50 @@ out_err:
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
- struct qla_hw_data *ha = vha->hw;
-
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
if (!cmd->sg_mapped)
return;
- pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ qpair = cmd->qpair;
+
+ pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
return;
-
+ ha = vha->hw;
if (cmd->ctx_dsd_alloced)
qla2x00_clean_dsd_pool(ha, cmd->ctx);
dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
}
-static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
uint32_t req_cnt)
{
- uint32_t cnt, cnt_in;
+ uint32_t cnt;
+ struct req_que *req = qpair->req;
- if (vha->req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
- cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out));
- if (vha->req->ring_index < cnt)
- vha->req->cnt = cnt - vha->req->ring_index;
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- vha->req->cnt = vha->req->length -
- (vha->req->ring_index - cnt);
-
- if (unlikely(vha->req->cnt < (req_cnt + 2))) {
- ql_dbg(ql_dbg_io, vha, 0x305a,
- "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
- vha->vp_idx, vha->req->ring_index,
- vha->req->cnt, req_cnt, cnt, cnt_in,
- vha->req->length);
+ req->cnt = req->length - (req->ring_index - cnt);
+
+ if (unlikely(req->cnt < (req_cnt + 2)))
return -EAGAIN;
- }
}
- vha->req->cnt -= req_cnt;
+ req->cnt -= req_cnt;
return 0;
}
@@ -2287,67 +2324,73 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+static inline void *qlt_get_req_pkt(struct req_que *req)
{
/* Adjust ring index. */
- vha->req->ring_index++;
- if (vha->req->ring_index == vha->req->length) {
- vha->req->ring_index = 0;
- vha->req->ring_ptr = vha->req->ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- vha->req->ring_ptr++;
+ req->ring_ptr++;
}
- return (cont_entry_t *)vha->req->ring_ptr;
+ return (cont_entry_t *)req->ring_ptr;
}
/* ha->hardware_lock supposed to be held on entry */
-static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
{
- struct qla_hw_data *ha = vha->hw;
uint32_t h;
+ int index;
+ uint8_t found = 0;
+ struct req_que *req = qpair->req;
+
+ h = req->current_outstanding_cmd;
+
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ h++;
+ if (h == req->num_outstanding_cmds)
+ h = 1;
- h = ha->tgt.current_handle;
- /* always increment cmd handle */
- do {
- ++h;
- if (h > DEFAULT_OUTSTANDING_COMMANDS)
- h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
- if (h == ha->tgt.current_handle) {
- ql_dbg(ql_dbg_io, vha, 0x305b,
- "qla_target(%d): Ran out of "
- "empty cmd slots in ha %p\n", vha->vp_idx, ha);
- h = QLA_TGT_NULL_HANDLE;
+ if (h == QLA_TGT_SKIP_HANDLE)
+ continue;
+
+ if (!req->outstanding_cmds[h]) {
+ found = 1;
break;
}
- } while ((h == QLA_TGT_NULL_HANDLE) ||
- (h == QLA_TGT_SKIP_HANDLE) ||
- (ha->tgt.cmds[h-1] != NULL));
+ }
- if (h != QLA_TGT_NULL_HANDLE)
- ha->tgt.current_handle = h;
+ if (found) {
+ req->current_outstanding_cmd = h;
+ } else {
+ ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
+ "qla_target(%d): Ran out of empty cmd slots\n",
+ qpair->vha->vp_idx);
+ h = QLA_TGT_NULL_HANDLE;
+ }
return h;
}
/* ha->hardware_lock supposed to be held on entry */
-static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
+ struct qla_tgt_prm *prm)
{
uint32_t h;
struct ctio7_to_24xx *pkt;
- struct qla_hw_data *ha = vha->hw;
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t temp;
- pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
prm->pkt = pkt;
memset(pkt, 0, sizeof(*pkt));
pkt->entry_type = CTIO_TYPE7;
pkt->entry_count = (uint8_t)prm->req_cnt;
- pkt->vp_index = vha->vp_idx;
+ pkt->vp_index = prm->cmd->vp_idx;
- h = qlt_make_handle(vha);
+ h = qlt_make_handle(qpair);
if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
/*
* CTIO type 7 from the firmware doesn't provide a way to
@@ -2356,16 +2399,18 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h - 1] = prm->cmd;
+ qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
pkt->exchange_addr = atio->u.isp24.exchange_addr;
- pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ temp = atio->u.isp24.attr << 9;
+ pkt->u.status0.flags |= cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
@@ -2377,17 +2422,16 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
* ha->hardware_lock supposed to be held on entry. We have already made sure
* that there is sufficient amount of request entries to not drop it.
*/
-static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
uint32_t *dword_ptr;
- int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
/* Build continuation packets */
while (prm->seg_cnt > 0) {
cont_a64_entry_t *cont_pkt64 =
- (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+ (cont_a64_entry_t *)qlt_get_req_pkt(
+ prm->cmd->qpair->req);
/*
* Make sure that from cont_pkt64 none of
@@ -2401,30 +2445,18 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
cont_pkt64->entry_count = 1;
cont_pkt64->sys_define = 0;
- if (enable_64bit_addressing) {
- cont_pkt64->entry_type = CONTINUE_A64_TYPE;
- dword_ptr =
- (uint32_t *)&cont_pkt64->dseg_0_address;
- } else {
- cont_pkt64->entry_type = CONTINUE_TYPE;
- dword_ptr =
- (uint32_t *)&((cont_entry_t *)
- cont_pkt64)->dseg_0_address;
- }
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
/* Load continuation entry data segments */
for (cnt = 0;
- cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32
(sg_dma_address(prm->sg)));
- if (enable_64bit_addressing) {
- *dword_ptr++ =
- cpu_to_le32(pci_dma_hi32
- (sg_dma_address
- (prm->sg)));
- }
+ *dword_ptr++ = cpu_to_le32(pci_dma_hi32
+ (sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
prm->sg = sg_next(prm->sg);
@@ -2436,12 +2468,10 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
* ha->hardware_lock supposed to be held on entry. We have already made sure
* that there is sufficient amount of request entries to not drop it.
*/
-static void qlt_load_data_segments(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static void qlt_load_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
uint32_t *dword_ptr;
- int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
@@ -2464,21 +2494,20 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
/* Load command entry data segments */
for (cnt = 0;
- (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
- if (enable_64bit_addressing) {
- *dword_ptr++ =
- cpu_to_le32(pci_dma_hi32(
- sg_dma_address(prm->sg)));
- }
+
+ *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
prm->sg = sg_next(prm->sg);
}
- qlt_load_cont_data_segments(prm, vha);
+ qlt_load_cont_data_segments(prm);
}
static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
@@ -2498,35 +2527,35 @@ static void qlt_print_dif_err(struct qla_tgt_prm *prm)
/* ASCQ */
switch (prm->sense_buffer[13]) {
case 1:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
"BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
case 2:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
"BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
case 3:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
"BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
default:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
"BE detected Dif ERR: lba[%llx|%lld] len[%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
}
- ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
}
}
@@ -2537,24 +2566,23 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
uint32_t *full_req_cnt)
{
- struct qla_tgt *tgt = cmd->tgt;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct qla_qpair *qpair = cmd->qpair;
prm->cmd = cmd;
- prm->tgt = tgt;
+ prm->tgt = cmd->tgt;
+ prm->pkt = NULL;
prm->rq_result = scsi_status;
prm->sense_buffer = &cmd->sense_buffer[0];
prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
prm->sg = NULL;
prm->seg_cnt = -1;
prm->req_cnt = 1;
+ prm->residual = 0;
prm->add_status_pkt = 0;
-
- /* Send marker if required */
- if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
- return -EFAULT;
+ prm->prot_sg = NULL;
+ prm->prot_seg_cnt = 0;
+ prm->tot_dsds = 0;
if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
if (qlt_pci_map_calc_cnt(prm) != 0)
@@ -2565,7 +2593,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
+ ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
"Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
prm->residual, se_cmd->tag,
se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -2573,7 +2601,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->rq_result |= SS_RESIDUAL_UNDER;
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_io, vha, 0x305d,
+ ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
"Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
@@ -2587,7 +2615,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
*/
if (qlt_has_data(cmd)) {
if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
- (IS_FWI2_CAPABLE(ha) &&
+ (IS_FWI2_CAPABLE(cmd->vha->hw) &&
(prm->rq_result != 0))) {
prm->add_status_pkt = 1;
(*full_req_cnt)++;
@@ -2598,17 +2626,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
return 0;
}
-static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
- struct qla_tgt_cmd *cmd, int sending_sense)
+static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
+ int sending_sense)
{
- if (ha->tgt.enable_class_2)
+ if (cmd->qpair->enable_class_2)
return 0;
if (sending_sense)
return cmd->conf_compl_supported;
else
- return ha->tgt.enable_explicit_conf &&
- cmd->conf_compl_supported;
+ return cmd->qpair->enable_explicit_conf &&
+ cmd->conf_compl_supported;
}
static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
@@ -2617,7 +2645,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
(uint32_t)sizeof(ctio->u.status1.sense_data));
ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
- if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ if (qlt_need_explicit_conf(prm->cmd, 0)) {
ctio->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
@@ -2627,9 +2655,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
int i;
- if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (qlt_need_explicit_conf(prm->cmd, 1)) {
if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
"Skipping EXPLICIT_CONFORM and "
"CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
"non GOOD status\n");
@@ -2797,7 +2825,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
}
static inline int
-qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
+qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
{
uint32_t *cur_dsd;
uint32_t transfer_length = 0;
@@ -2816,16 +2844,17 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct atio_from_isp *atio = &prm->cmd->atio;
struct qla_tc_param tc;
uint16_t t16;
+ scsi_qla_host_t *vha = cmd->vha;
ha = vha->hw;
- pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
+ pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
prm->pkt = pkt;
memset(pkt, 0, sizeof(*pkt));
- ql_dbg(ql_dbg_tgt, vha, 0xe071,
+ ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
- vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+ cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
@@ -2888,9 +2917,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
pkt->entry_count = 1;
- pkt->vp_index = vha->vp_idx;
+ pkt->vp_index = cmd->vp_idx;
- h = qlt_make_handle(vha);
+ h = qlt_make_handle(qpair);
if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
/*
* CTIO type 7 from the firmware doesn't provide a way to
@@ -2899,9 +2928,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
@@ -3005,7 +3035,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_queuing_error:
/* Cleanup will be performed by the caller */
- vha->hw->tgt.cmds[h - 1] = NULL;
+ qpair->req->outstanding_cmds[h] = NULL;
return QLA_FUNCTION_FAILED;
}
@@ -3018,33 +3048,28 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
uint8_t scsi_status)
{
struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = cmd->qpair;
struct ctio7_to_24xx *pkt;
struct qla_tgt_prm prm;
uint32_t full_req_cnt = 0;
unsigned long flags = 0;
int res;
- spin_lock_irqsave(&ha->hardware_lock, flags);
if (cmd->sess && cmd->sess->deleted) {
cmd->state = QLA_TGT_STATE_PROCESSED;
if (cmd->sess->logout_completed)
/* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
return 0;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- memset(&prm, 0, sizeof(prm));
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
- "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
+ ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
(xmit_type & QLA_TGT_XMIT_STATUS) ?
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
- &cmd->se_cmd);
+ &cmd->se_cmd, qpair->id);
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
@@ -3052,39 +3077,39 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
return res;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
if (xmit_type == QLA_TGT_XMIT_STATUS)
- vha->tgt_counters.core_qla_snd_status++;
+ qpair->tgt_counters.core_qla_snd_status++;
else
- vha->tgt_counters.core_qla_que_buf++;
+ qpair->tgt_counters.core_qla_que_buf++;
- if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
+ if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
*/
cmd->state = QLA_TGT_STATE_PROCESSED;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
- ql_dbg(ql_dbg_async, vha, 0xe101,
+ ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, ha->chip_reset);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cmd->reset_count, qpair->chip_reset);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
}
/* Does F/W have an IOCBs for this request */
- res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ res = qlt_check_reserve_free_req(qpair, full_req_cnt);
if (unlikely(res))
goto out_unmap_unlock;
if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
- res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ res = qlt_build_ctio_crc2_pkt(qpair, &prm);
else
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ res = qlt_24xx_build_ctio_pkt(qpair, &prm);
if (unlikely(res != 0)) {
- vha->req->cnt += full_req_cnt;
+ qpair->req->cnt += full_req_cnt;
goto out_unmap_unlock;
}
@@ -3096,7 +3121,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
- qlt_load_data_segments(&prm, vha);
+ qlt_load_data_segments(&prm);
if (prm.add_status_pkt == 0) {
if (xmit_type & QLA_TGT_XMIT_STATUS) {
@@ -3106,7 +3131,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cpu_to_le32(prm.residual);
pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
- if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ if (qlt_need_explicit_conf(cmd, 0)) {
pkt->u.status0.flags |=
cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
@@ -3121,9 +3146,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
* req_pkt().
*/
struct ctio7_to_24xx *ctio =
- (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(
+ qpair->req);
- ql_dbg(ql_dbg_io, vha, 0x305e,
+ ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
"Building additional status packet 0x%p.\n",
ctio);
@@ -3150,7 +3176,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
*/
qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
&prm);
- pr_debug("Status CTIO7: %p\n", ctio);
}
} else
qlt_24xx_init_ctio_to_isp(pkt, &prm);
@@ -3161,14 +3186,17 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
out_unmap_unlock:
qlt_unmap_sg(vha, cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
}
@@ -3178,11 +3206,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
struct ctio7_to_24xx *pkt;
struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = cmd->tgt;
struct qla_tgt_prm prm;
- unsigned long flags;
+ unsigned long flags = 0;
int res = 0;
+ struct qla_qpair *qpair = cmd->qpair;
memset(&prm, 0, sizeof(prm));
prm.cmd = cmd;
@@ -3190,17 +3218,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
- /* Send marker if required */
- if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
- return -EIO;
-
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
+ if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3208,25 +3230,25 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
*/
cmd->state = QLA_TGT_STATE_NEED_DATA;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
- ql_dbg(ql_dbg_async, vha, 0xe102,
+ ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, ha->chip_reset);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cmd->reset_count, qpair->chip_reset);
return 0;
}
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
- res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
if (res != 0)
goto out_unlock_free_unmap;
if (cmd->se_cmd.prot_op)
- res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ res = qlt_build_ctio_crc2_pkt(qpair, &prm);
else
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ res = qlt_24xx_build_ctio_pkt(qpair, &prm);
if (unlikely(res != 0)) {
- vha->req->cnt += prm.req_cnt;
+ qpair->req->cnt += prm.req_cnt;
goto out_unlock_free_unmap;
}
@@ -3235,21 +3257,24 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
- qlt_load_data_segments(&prm, vha);
+ qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
cmd->cmd_sent_to_fw = 1;
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
out_unlock_free_unmap:
qlt_unmap_sg(vha, cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
}
@@ -3260,7 +3285,7 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
* it is assumed either hardware_lock or qpair lock is held.
*/
static void
-qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
+qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
@@ -3268,6 +3293,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
unsigned long flags;
+ struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
@@ -3286,15 +3312,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check appl tag */
if (cmd->e_app_tag != cmd->a_app_tag) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard [%x|%x] cmd=%p ox_id[%04x]",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
cmd->dif_err_code = DIF_ERR_APP;
scsi_status = SAM_STAT_CHECK_CONDITION;
@@ -3305,15 +3328,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check ref tag */
if (cmd->e_ref_tag != cmd->a_ref_tag) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard[%x|%x] cmd=%p ox_id[%04x] ",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
cmd->dif_err_code = DIF_ERR_REF;
scsi_status = SAM_STAT_CHECK_CONDITION;
@@ -3325,15 +3345,13 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check guard */
if (cmd->e_guard != cmd->a_guard) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard [%x|%x] cmd=%p ox_id[%04x]",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
+
cmd->dif_err_code = DIF_ERR_GRD;
scsi_status = SAM_STAT_CHECK_CONDITION;
sense_key = ABORTED_COMMAND;
@@ -3356,7 +3374,8 @@ out:
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
+ ascq);
/* assume scsi status gets out on the wire.
* Will not wait for completion.
*/
@@ -3422,9 +3441,6 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
unsigned long flags = 0;
int rc;
- if (qlt_issue_marker(vha, ha_locked) < 0)
- return;
-
if (ha_locked) {
rc = __qlt_send_term_imm_notif(vha, imm);
@@ -3451,21 +3467,24 @@ done:
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
-/* If hardware_lock held on entry, might drop it, then reaquire */
-/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
-static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+/*
+ * If hardware_lock held on entry, might drop it, then reaquire
+ * This function sends the appropriate CTIO to ISP 2xxx or 24xx
+ */
+static int __qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
int ret = 0;
uint16_t temp;
- ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+ ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
@@ -3483,7 +3502,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ret = 1;
}
- vha->tgt_counters.num_term_xchg_sent++;
+ qpair->tgt_counters.num_term_xchg_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
@@ -3496,9 +3515,9 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
- ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
+ temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE;
+ ctio24->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -3511,28 +3530,35 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
return ret;
}
-static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+static void qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
int ul_abort)
{
+ struct scsi_qla_host *vha;
unsigned long flags = 0;
int rc;
- if (qlt_issue_marker(vha, ha_locked) < 0)
- return;
+ /* why use different vha? NPIV */
+ if (cmd)
+ vha = cmd->vha;
+ else
+ vha = qpair->vha;
if (ha_locked) {
- rc = __qlt_send_term_exchange(vha, cmd, atio);
+ rc = __qlt_send_term_exchange(qpair, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
goto done;
}
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- rc = __qlt_send_term_exchange(vha, cmd, atio);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ rc = __qlt_send_term_exchange(qpair, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
@@ -3544,7 +3570,7 @@ done:
}
if (!ha_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
@@ -3616,17 +3642,17 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
* 1) XFER Rdy completion + CMD_T_ABORT
* 2) TCM TMR - drain_state_list
*/
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "multiple abort. %p transport_state %x, t_state %x,"
- " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
- cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
+ "multiple abort. %p transport_state %x, t_state %x, "
+ "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
return EIO;
}
cmd->aborted = 1;
cmd->trc_flags |= TRC_ABORT;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+ qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3665,13 +3691,14 @@ EXPORT_SYMBOL(qlt_free_cmd);
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
struct qla_tgt_cmd *cmd, uint32_t status)
{
int term = 0;
+ struct scsi_qla_host *vha = qpair->vha;
if (cmd->se_cmd.prot_op)
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
"Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x] op %#x/%s",
cmd->lba, cmd->lba,
@@ -3688,55 +3715,53 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
term = 1;
if (term)
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
+ qlt_term_ctio_exchange(qpair, ctio, cmd, status);
return term;
}
-/* ha->hardware_lock supposed to be held on entry */
-static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
- uint32_t handle)
-{
- struct qla_hw_data *ha = vha->hw;
-
- handle--;
- if (ha->tgt.cmds[handle] != NULL) {
- struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
- ha->tgt.cmds[handle] = NULL;
- return cmd;
- } else
- return NULL;
-}
/* ha->hardware_lock supposed to be held on entry */
static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
- uint32_t handle, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, void *ctio)
{
struct qla_tgt_cmd *cmd = NULL;
+ struct req_que *req;
+ int qid = GET_QID(handle);
+ uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
- /* Clear out internal marks */
- handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
- CTIO_INTERMEDIATE_HANDLE_MARK);
+ if (unlikely(h == QLA_TGT_SKIP_HANDLE))
+ return NULL;
- if (handle != QLA_TGT_NULL_HANDLE) {
- if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
- return NULL;
+ if (qid == rsp->req->id) {
+ req = rsp->req;
+ } else if (vha->hw->req_q_map[qid]) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
+ "qla_target(%d): CTIO completion with different QID %d handle %x\n",
+ vha->vp_idx, rsp->id, handle);
+ req = vha->hw->req_q_map[qid];
+ } else {
+ return NULL;
+ }
+
+ h &= QLA_CMD_HANDLE_MASK;
- /* handle-1 is actually used */
- if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
+ if (h != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(h > req->num_outstanding_cmds)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
return NULL;
}
- cmd = qlt_get_cmd(vha, handle);
+
+ cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe053,
- "qla_target(%d): Suspicious: unable to "
- "find the command with handle %x\n", vha->vp_idx,
- handle);
+ ql_dbg(ql_dbg_async, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
+ vha->vp_idx, handle, req->id, rsp->id);
return NULL;
}
+ req->outstanding_cmds[h] = NULL;
} else if (ctio != NULL) {
/* We can't get loop ID from CTIO7 */
ql_dbg(ql_dbg_tgt, vha, 0xe054,
@@ -3749,33 +3774,30 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
}
/* hardware_lock should be held by caller. */
-static void
+void
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha = vha->hw;
- uint32_t handle;
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
- handle = qlt_make_handle(vha);
-
/* TODO: fix debug message type and ids. */
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
ql_dbg(ql_dbg_io, vha, 0xff00,
- "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+ "HOST-ABORT: state=PROCESSED.\n");
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
ql_dbg(ql_dbg_io, vha, 0xff01,
- "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+ "HOST-ABORT: state=DATA_IN.\n");
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else {
ql_dbg(ql_dbg_io, vha, 0xff03,
- "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+ "HOST-ABORT: state=BAD(%d).\n",
cmd->state);
dump_stack();
}
@@ -3784,51 +3806,16 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
ha->tgt.tgt_ops->free_cmd(cmd);
}
-void
-qlt_host_reset_handler(struct qla_hw_data *ha)
-{
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- scsi_qla_host_t *vha = NULL;
- struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
- uint32_t i;
-
- if (!base_vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt || qla_ini_mode_enabled(base_vha)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
- "Target mode disabled\n");
- return;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
- "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
- base_vha->dpc_flags);
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
- cmd = qlt_get_cmd(base_vha, i);
- if (!cmd)
- continue;
- /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
- vha = cmd->vha;
- qlt_abort_cmd_on_host_reset(vha, cmd);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
- uint32_t status, void *ctio)
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
struct qla_tgt_cmd *cmd;
+ struct qla_qpair *qpair = rsp->qpair;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
@@ -3840,7 +3827,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
- cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -3885,7 +3872,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f8,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
@@ -3904,7 +3891,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- qlt_handle_dif_error(vha, cmd, ctio);
+ qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
default:
@@ -3924,7 +3911,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
(!cmd->aborted)) {
cmd->trc_flags |= TRC_CTIO_ERR;
- if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
return;
}
}
@@ -4000,18 +3987,16 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
{
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
+ struct qla_qpair *qpair = cmd->qpair;
cmd->cmd_in_wq = 0;
cmd->trc_flags |= TRC_DO_WORK;
- if (tgt->tgt_stop)
- goto out_term;
if (cmd->aborted) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
@@ -4023,8 +4008,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
- cmd->unpacked_lun = scsilun_to_int(
- (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -4062,12 +4045,12 @@ out_term:
* argument to qlt_send_term_exchange() and free the memory here.
*/
cmd->trc_flags |= TRC_DO_WORK_ERR;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
@@ -4087,6 +4070,110 @@ static void qlt_do_work(struct work_struct *work)
__qlt_do_work(cmd);
}
+void qlt_clr_qp_table(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ void *node;
+ u64 key = 0;
+
+ ql_log(ql_log_info, vha, 0x706c,
+ "User update Number of Active Qpairs %d\n",
+ ha->tgt.num_act_qpairs);
+
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+
+ btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+ btree_remove64(&tgt->lun_qpair_map, key);
+
+ ha->base_qpair->lun_cnt = 0;
+ for (key = 0; key < ha->max_qpairs; key++)
+ if (ha->queue_pair_map[key])
+ ha->queue_pair_map[key]->lun_cnt = 0;
+
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+}
+
+static void qlt_assign_qpair(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd)
+{
+ struct qla_qpair *qpair, *qp;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_qpair_hint *h;
+
+ if (vha->flags.qpairs_available) {
+ h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
+ if (unlikely(!h)) {
+ /* spread lun to qpair ratio evently */
+ int lcnt = 0, rc;
+ struct scsi_qla_host *base_vha =
+ pci_get_drvdata(vha->hw->pdev);
+
+ qpair = vha->hw->base_qpair;
+ if (qpair->lun_cnt == 0) {
+ qpair->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qpair);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qpair->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd037,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ goto out;
+ } else {
+ lcnt = qpair->lun_cnt;
+ }
+
+ h = NULL;
+ list_for_each_entry(qp, &base_vha->qp_list,
+ qp_list_elem) {
+ if (qp->lun_cnt == 0) {
+ qp->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qp);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qp->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd038,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ qpair = qp;
+ goto out;
+ } else {
+ if (qp->lun_cnt < lcnt) {
+ lcnt = qp->lun_cnt;
+ qpair = qp;
+ continue;
+ }
+ }
+ }
+ BUG_ON(!qpair);
+ qpair->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qpair);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qpair->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd039,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ }
+ } else {
+ h = &tgt->qphints[0];
+ }
+out:
+ cmd->qpair = h->qpair;
+ cmd->se_cmd.cpuid = h->cpuid;
+}
+
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
@@ -4101,7 +4188,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct qla_tgt_cmd));
-
+ cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -4115,14 +4202,15 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->trc_flags = 0;
cmd->jiffies_at_alloc = get_jiffies_64();
- cmd->reset_count = vha->hw->chip_reset;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+ qlt_assign_qpair(vha, cmd);
+ cmd->reset_count = vha->hw->base_qpair->chip_reset;
+ cmd->vp_idx = vha->vp_idx;
return cmd;
}
-static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
- uint16_t);
-
static void qlt_create_sess_from_atio(struct work_struct *work)
{
struct qla_tgt_sess_op *op = container_of(work,
@@ -4168,10 +4256,15 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
*/
cmd = qlt_get_tag(vha, sess, &op->atio);
if (!cmd) {
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
+ struct qla_qpair *qpair = ha->base_qpair;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
kfree(op);
return;
}
@@ -4184,9 +4277,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
kfree(op);
return;
out_term:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
kfree(op);
}
@@ -4216,9 +4307,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
memcpy(&op->atio, atio, sizeof(*atio));
op->vha = vha;
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
@@ -4228,7 +4319,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
if (sess->deleted) {
- ql_dbg(ql_dbg_io, vha, 0x3061,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
"New command while old session %p is being deleted\n",
sess);
return -EFAULT;
@@ -4238,7 +4329,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"%s: kref_get fail, %8phC oxid %x \n",
__func__, sess->port_name,
be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
@@ -4257,15 +4348,15 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd->cmd_in_wq = 1;
cmd->trc_flags |= TRC_NEW_CMD;
- cmd->se_cmd.cpuid = ha->msix_count ?
- ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&cmd->work, qlt_do_work);
- if (ha->msix_count) {
+ if (vha->flags.qpairs_available) {
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
+ } else if (ha->msix_count) {
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work);
@@ -4275,8 +4366,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
} else {
queue_work(qla_tgt_wq, &cmd->work);
}
- return 0;
+ return 0;
}
/* ha->hardware_lock supposed to be held on entry */
@@ -4306,7 +4397,8 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
}
mcmd->tmr_func = fn;
mcmd->flags = flags;
- mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->reset_count = ha->base_qpair->chip_reset;
+ mcmd->qpair = ha->base_qpair;
switch (fn) {
case QLA_TGT_LUN_RESET:
@@ -4333,13 +4425,12 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt;
struct fc_port *sess;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int fn;
unsigned long flags;
tgt = vha->vha_tgt.qla_tgt;
- lun = a->u.isp24.fcp_cmnd.lun;
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4347,7 +4438,8 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
a->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
@@ -4370,7 +4462,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int rc;
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
@@ -4386,10 +4478,11 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
sizeof(mcmd->orig_iocb.imm_ntfy));
- lun = a->u.isp24.fcp_cmnd.lun;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
- mcmd->reset_count = vha->hw->chip_reset;
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+ mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
+ mcmd->qpair = ha->base_qpair;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -4493,7 +4586,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4529,12 +4622,13 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
struct qla_tgt_cmd *cmd;
uint32_t key;
int count = 0;
+ unsigned long flags;
key = (((u32)s_id->b.domain << 16) |
((u32)s_id->b.area << 8) |
((u32)s_id->b.al_pa));
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
@@ -4559,7 +4653,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
count++;
}
}
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return count;
}
@@ -4672,9 +4766,9 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
(sess->d_id.b24 == port_id.b24));
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
qlt_schedule_sess_for_deletion_lock(sess);
@@ -4744,7 +4838,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fa,
"%s %d %8phC post nack\n",
__func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
@@ -4757,7 +4851,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
} else {
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fb,
"%s %d %8phC post nack\n",
__func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
@@ -4791,7 +4885,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fc,
"%s: logo %llx res %d sess %p ",
__func__, wwn, res, sess);
if (res == 0) {
@@ -4815,15 +4909,15 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair,
+ &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
sess = qla2x00_find_fcport_by_wwpn(vha,
iocb->u.isp24.port_name, 1);
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fd,
"sess %p lid %d|%d DS %d LS %d\n",
sess, sess->loop_id, loop_id,
sess->disc_state, sess->fw_login_state);
@@ -4879,8 +4973,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
le16_to_cpu(iocb->u.isp24.nport_handle),
iocb->u.isp24.status_subcode);
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair,
+ &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
}
memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
tgt->link_reinit_iocb_pending = 1;
@@ -4974,33 +5068,36 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
}
if (send_notify_ack)
- qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
+ 0, 0);
}
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
* This function sends busy to ISP 2xxx or 24xx.
*/
-static int __qlt_send_busy(struct scsi_qla_host *vha,
+static int __qlt_send_busy(struct qla_qpair *qpair,
struct atio_from_isp *atio, uint16_t status)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
struct fc_port *sess = NULL;
unsigned long flags;
+ u16 temp;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
if (!pkt) {
ql_dbg(ql_dbg_io, vha, 0x3063,
"qla_target(%d): %s failed: unable to allocate "
@@ -5008,7 +5105,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
return -ENOMEM;
}
- vha->tgt_counters.num_q_full_sent++;
+ qpair->tgt_counters.num_q_full_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
@@ -5021,10 +5118,10 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
- ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(
+ temp = (atio->u.isp24.attr << 9) |
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
- CTIO7_FLAGS_DONT_RET_CTIO);
+ CTIO7_FLAGS_DONT_RET_CTIO;
+ ctio24->u.status1.flags = cpu_to_le16(temp);
/*
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
@@ -5033,7 +5130,10 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24->u.status1.scsi_status = cpu_to_le16(status);
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
return 0;
}
@@ -5052,6 +5152,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
int tag;
+ unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x300a,
@@ -5110,8 +5211,9 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
- cmd->reset_count = vha->hw->chip_reset;
+ cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
+ cmd->qpair = ha->base_qpair;
if (qfull) {
cmd->q_full = 1;
@@ -5120,6 +5222,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
} else
cmd->term_exchg = 1;
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
vha->hw->tgt.num_qfull_cmds_alloc++;
@@ -5127,35 +5230,41 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
vha->qla_stats.stat_max_qfull_cmds_alloc)
vha->qla_stats.stat_max_qfull_cmds_alloc =
vha->hw->tgt.num_qfull_cmds_alloc;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
int
-qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+qlt_free_qfull_cmds(struct qla_qpair *qpair)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_tgt_cmd *cmd, *tcmd;
- struct list_head free_list;
+ struct list_head free_list, q_full_list;
int rc = 0;
if (list_empty(&ha->tgt.q_full_list))
return 0;
INIT_LIST_HEAD(&free_list);
+ INIT_LIST_HEAD(&q_full_list);
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
if (list_empty(&ha->tgt.q_full_list)) {
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
return 0;
}
- list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+ list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
if (cmd->q_full)
/* cmd->state is a borrowed field to hold status */
- rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+ rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
else if (cmd->term_exchg)
- rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+ rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
if (rc == -ENOMEM)
break;
@@ -5179,7 +5288,7 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha)
/* piggy back on hardware_lock for protection */
vha->hw->tgt.num_qfull_cmds_alloc--;
}
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
cmd = NULL;
@@ -5190,23 +5299,31 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha)
*/
qlt_free_cmd(cmd);
}
+
+ if (!list_empty(&q_full_list)) {
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+ list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+ }
+
return rc;
}
static void
-qlt_send_busy(struct scsi_qla_host *vha,
- struct atio_from_isp *atio, uint16_t status)
+qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
+ uint16_t status)
{
int rc = 0;
+ struct scsi_qla_host *vha = qpair->vha;
- rc = __qlt_send_busy(vha, atio, status);
+ rc = __qlt_send_busy(qpair, atio, status);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, status, 1);
}
static int
-qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio, bool ha_locked)
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
@@ -5218,7 +5335,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
- qlt_send_busy(vha, atio, status);
+ qlt_send_busy(qpair, atio, status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5257,16 +5374,17 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio,
+ SAM_STAT_TASK_SET_FULL);
if (!ha_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
break;
}
-
-
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
+ rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
+ atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5278,19 +5396,19 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
if (!ha_locked)
- spin_lock_irqsave
- (&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ qlt_send_busy(ha->base_qpair, atio,
+ SAM_STAT_BUSY);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL,
+ atio, 1, 0);
#endif
-
if (!ha_locked)
- spin_unlock_irqrestore
- (&ha->hardware_lock, flags);
-
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
} else {
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_tgt, vha, 0xe059,
@@ -5305,7 +5423,8 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (!ha_locked)
spin_lock_irqsave(
&ha->hardware_lock, flags);
- qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ qlt_send_busy(ha->base_qpair,
+ atio, SAM_STAT_BUSY);
if (!ha_locked)
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
@@ -5346,15 +5465,15 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
-static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+static void qlt_response_pkt(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05d,
- "qla_target(%d): Response pkt %x received, but no "
- "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
+ vha->vp_idx, pkt->entry_type, vha->hw);
return;
}
@@ -5363,14 +5482,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
* Otherwise, some commands can stuck.
*/
- tgt->irq_cmd_count++;
-
switch (pkt->entry_type) {
case CTIO_CRC2:
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5389,19 +5506,17 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
- if (rc != 0) {
- tgt->irq_cmd_count--;
+ rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
+ if (rc != 0)
return;
- }
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(vha, atio, 0);
+ qlt_send_busy(rsp->qpair, atio, 0);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
#endif
} else {
if (tgt->tgt_stop) {
@@ -5409,14 +5524,14 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
"qla_target: Unable to send "
"command to target, sending TERM "
"EXCHANGE for rsp\n");
- qlt_send_term_exchange(vha, NULL,
+ qlt_send_term_exchange(rsp->qpair, NULL,
atio, 1, 0);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send "
"command to target, sending BUSY "
"status\n", vha->vp_idx);
- qlt_send_busy(vha, atio, 0);
+ qlt_send_busy(rsp->qpair, atio, 0);
}
}
}
@@ -5426,7 +5541,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5435,7 +5550,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5525,7 +5640,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- tgt->irq_cmd_count--;
}
/*
@@ -5538,15 +5652,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
- if (!ha->tgt.tgt_ops)
+ if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
return;
- if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe03a,
- "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
- return;
- }
-
if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
IS_QLA2100(ha))
return;
@@ -5555,7 +5663,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
* Otherwise, some commands can stuck.
*/
- tgt->irq_cmd_count++;
switch (code) {
case MBA_RESET: /* Reset */
@@ -5578,7 +5685,8 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ qlt_send_notify_ack(ha->base_qpair,
+ (void *)&tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5597,17 +5705,17 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
case MBA_REJECTED_FCP_CMD:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "qla_target(%d): Async event LS_REJECT occurred "
- "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
+ "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
+ vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
if (le16_to_cpu(mailbox[3]) == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03a,
"Exchange starvation-. Resetting RISC\n");
vha->hw->exch_starvation = 0;
@@ -5643,7 +5751,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
}
- tgt->irq_cmd_count--;
}
static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
@@ -5707,12 +5814,12 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fe,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name, vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name, vha->fcport_count);
qla24xx_post_gpsc_work(vha, fcport);
@@ -5838,7 +5945,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
"%s: kref_get fail %8phC \n",
__func__, sess->port_name);
sess = NULL;
@@ -5861,7 +5968,8 @@ out_term2:
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
+ FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -5875,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unsigned long flags;
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int fn;
void *iocb;
@@ -5902,7 +6010,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
@@ -5911,9 +6019,9 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
}
iocb = a;
- lun = a->u.isp24.fcp_cmnd.lun;
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
ha->tgt.tgt_ops->put_sess(sess);
@@ -5928,7 +6036,7 @@ out_term2:
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5976,6 +6084,8 @@ static void qlt_sess_work_fn(struct work_struct *work)
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
struct qla_tgt *tgt;
+ int rc, i;
+ struct qla_qpair_hint *h;
if (!QLA_TGT_MODE_ENABLED())
return 0;
@@ -5998,9 +6108,47 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
return -ENOMEM;
}
+ tgt->qphints = kzalloc((ha->max_qpairs + 1) *
+ sizeof(struct qla_qpair_hint), GFP_KERNEL);
+ if (!tgt->qphints) {
+ kfree(tgt);
+ ql_log(ql_log_warn, base_vha, 0x0197,
+ "Unable to allocate qpair hints.\n");
+ return -ENOMEM;
+ }
+
if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
base_vha->host->hostt->supported_mode |= MODE_TARGET;
+ rc = btree_init64(&tgt->lun_qpair_map);
+ if (rc) {
+ kfree(tgt->qphints);
+ kfree(tgt);
+ ql_log(ql_log_info, base_vha, 0x0198,
+ "Unable to initialize lun_qpair_map btree\n");
+ return -EIO;
+ }
+ h = &tgt->qphints[0];
+ h->qpair = ha->base_qpair;
+ INIT_LIST_HEAD(&h->hint_elem);
+ h->cpuid = ha->base_qpair->cpuid;
+ list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ unsigned long flags;
+
+ struct qla_qpair *qpair = ha->queue_pair_map[i];
+ h = &tgt->qphints[i + 1];
+ INIT_LIST_HEAD(&h->hint_elem);
+ if (qpair) {
+ h->qpair = qpair;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ list_add_tail(&h->hint_elem, &qpair->hints_list);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ h->cpuid = qpair->cpuid;
+ }
+ }
+
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
@@ -6015,11 +6163,8 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
"qla_target(%d): using 64 Bit PCI addressing",
base_vha->vp_idx);
- tgt->tgt_enable_64bit_addr = 1;
/* 3 is reserved */
tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
- tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
- tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
@@ -6231,7 +6376,6 @@ qlt_enable_vha(struct scsi_qla_host *vha)
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -6250,17 +6394,6 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
} else {
- if (ha->msix_entries) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
- "%s: host%ld : vector %d cpu %d\n",
- __func__, vha->host_no,
- ha->msix_entries[rspq_ent].vector,
- ha->msix_entries[rspq_ent].cpuid);
-
- ha->tgt.rspq_vector_cpuid =
- ha->msix_entries[rspq_ent].cpuid;
- }
-
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
@@ -6387,14 +6520,15 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
* can not be trusted. There is no point in passing
* it further up.
*/
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03c,
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
adjust_corrupted_atio(pkt);
- qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
+ ha_locked, 0);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
@@ -6445,8 +6579,9 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
- u32 tmp;
- u16 t;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
@@ -6461,24 +6596,10 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
- } else { /* dual */
- if (ql_dm_tgt_ex_pct > 100) {
- ql_dm_tgt_ex_pct = 50;
- } else if (ql_dm_tgt_ex_pct == 100) {
- /* leave some for FW */
- ql_dm_tgt_ex_pct = 95;
- }
-
- tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
- tmp = tmp/100;
- if (tmp > 0xffff)
- tmp = 0xffff;
-
- t = tmp & 0xffff;
- nv->exchange_count = cpu_to_le16(t);
- }
+ else /* dual */
+ nv->exchange_count = cpu_to_le16(ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6522,7 +6643,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return;
}
- if (ha->tgt.enable_class_2) {
+ if (ha->base_qpair->enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -6563,8 +6684,6 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
- u32 tmp;
- u16 t;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6582,23 +6701,10 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
- } else { /* dual */
- if (ql_dm_tgt_ex_pct > 100) {
- ql_dm_tgt_ex_pct = 50;
- } else if (ql_dm_tgt_ex_pct == 100) {
- /* leave some for FW */
- ql_dm_tgt_ex_pct = 95;
- }
-
- tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
- tmp = tmp/100;
- if (tmp > 0xffff)
- tmp = 0xffff;
- t = tmp & 0xffff;
- nv->exchange_count = cpu_to_le16(t);
- }
+ else /* dual */
+ nv->exchange_count = cpu_to_le16(ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6641,7 +6747,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return;
}
- if (ha->tgt.enable_class_2) {
+ if (ha->base_qpair->enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -6688,21 +6794,6 @@ qlt_83xx_iospace_config(struct qla_hw_data *ha)
ha->msix_count += 1; /* For ATIO Q */
}
-int
-qlt_24xx_process_response_error(struct scsi_qla_host *vha,
- struct sts_entry_24xx *pkt)
-{
- switch (pkt->entry_type) {
- case ABTS_RECV_24XX:
- case ABTS_RESP_24XX:
- case CTIO_TYPE7:
- case NOTIFY_ACK_TYPE:
- case CTIO_CRC2:
- return 1;
- default:
- return 0;
- }
-}
void
qlt_modify_vp_config(struct scsi_qla_host *vha,
@@ -6744,7 +6835,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
rc = btree_init32(&ha->tgt.host_map);
if (rc)
- ql_log(ql_log_info, base_vha, 0xffff,
+ ql_log(ql_log_info, base_vha, 0xd03d,
"Unable to initialize ha->host_map btree\n");
qlt_update_vp_map(base_vha, SET_VP_IDX);
@@ -6780,7 +6871,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
- if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
+ if (qla2x00_reset_active(vha) ||
+ (op->chip_reset != ha->base_qpair->chip_reset))
return;
spin_lock_irqsave(&ha->tgt.atio_lock, flags);
@@ -6788,14 +6880,15 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+ qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
}
void
-qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
+qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
+ response_t *pkt)
{
struct qla_tgt_sess_op *op;
@@ -6805,13 +6898,14 @@ qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
/* do not reach for ATIO queue here. This is best effort err
* recovery at this point.
*/
- qlt_response_pkt_all_vps(vha, pkt);
+ qlt_response_pkt_all_vps(vha, rsp, pkt);
return;
}
memcpy(&op->atio, pkt, sizeof(*pkt));
op->vha = vha;
- op->chip_reset = vha->hw->chip_reset;
+ op->chip_reset = vha->hw->base_qpair->chip_reset;
+ op->rsp = rsp;
INIT_WORK(&op->work, qlt_handle_abts_recv_work);
queue_work(qla_tgt_wq, &op->work);
return;
@@ -6872,25 +6966,25 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
case SET_AL_PA:
slot = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!slot) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
"Save vha in host_map %p %06x\n", vha, key);
rc = btree_insert32(&vha->hw->tgt.host_map,
key, vha, GFP_ATOMIC);
if (rc)
- ql_log(ql_log_info, vha, 0xffff,
+ ql_log(ql_log_info, vha, 0xd03e,
"Unable to insert s_id into host_map: %06x\n",
key);
return;
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "replace existing vha in host_map %p %06x\n", vha, key);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "replace existing vha in host_map %p %06x\n", vha, key);
btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
"clear vha in host_map %p %06x\n", vha, key);
slot = btree_lookup32(&vha->hw->tgt.host_map, key);
if (slot)
@@ -6952,7 +7046,7 @@ int __init qlt_init(void)
sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
qla_tgt_mgmt_cmd), 0, NULL);
if (!qla_tgt_mgmt_cmd_cachep) {
- ql_log(ql_log_fatal, NULL, 0xe06d,
+ ql_log(ql_log_fatal, NULL, 0xd04b,
"kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
return -ENOMEM;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d64420251194..7fe02d036bdf 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -70,6 +70,16 @@
/* Used to mark CTIO as intermediate */
#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+#define QLA_TGT_NULL_HANDLE 0
+
+#define QLA_TGT_HANDLE_MASK 0xF0000000
+#define QLA_QPID_HANDLE_MASK 0x00FF0000 /* qpair id mask */
+#define QLA_CMD_HANDLE_MASK 0x0000FFFF
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK)
+
+#define QLA_QPID_HANDLE_SHIFT 16
+#define GET_QID(_h) ((_h & QLA_QPID_HANDLE_MASK) >> QLA_QPID_HANDLE_SHIFT)
+
#ifndef OF_SS_MODE_0
/*
@@ -426,7 +436,7 @@ struct ctio7_to_24xx {
} status0;
struct {
uint16_t sense_length;
- uint16_t flags;
+ __le16 flags;
uint32_t residual;
__le16 ox_id;
uint16_t scsi_status;
@@ -664,6 +674,7 @@ struct abts_resp_from_24xx_fw {
struct qla_tgt_mgmt_cmd;
struct fc_port;
+struct qla_tgt_cmd;
/*
* This structure provides a template of function calls that the
@@ -675,7 +686,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -744,11 +755,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
-
-/* Special handles */
-#define QLA_TGT_NULL_HANDLE 0
-#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
-
/* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0
#define ATIO_HEAD_OF_QUEUE 1
@@ -781,22 +787,28 @@ struct qla_port_24xx_data {
uint16_t reserved;
};
+struct qla_qpair_hint {
+ struct list_head hint_elem;
+ struct qla_qpair *qpair;
+ u16 cpuid;
+ uint8_t cmd_cnt;
+};
+
struct qla_tgt {
struct scsi_qla_host *vha;
struct qla_hw_data *ha;
-
+ struct btree_head64 lun_qpair_map;
+ struct qla_qpair_hint *qphints;
/*
* To sync between IRQ handlers and qlt_target_release(). Needed,
* because req_pkt() can drop/reaquire HW lock inside. Protected by
* HW lock.
*/
- int irq_cmd_count;
int atio_irq_cmd_count;
- int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+ int sg_tablesize;
/* Target's flags, serialized by pha->hardware_lock */
- unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
unsigned int link_reinit_iocb_pending:1;
/*
@@ -832,6 +844,7 @@ struct qla_tgt_sess_op {
struct work_struct work;
struct list_head cmd_list;
bool aborted;
+ struct rsp_que *rsp;
};
enum trace_flags {
@@ -859,10 +872,16 @@ enum trace_flags {
};
struct qla_tgt_cmd {
+ /*
+ * Do not move cmd_type field. it needs to line up with srb->cmd_type
+ */
+ uint8_t cmd_type;
+ uint8_t pad[7];
struct se_cmd se_cmd;
struct fc_port *sess;
+ struct qla_qpair *qpair;
+ uint32_t reset_count;
int state;
- struct work_struct free_work;
struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
@@ -885,10 +904,10 @@ struct qla_tgt_cmd {
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
int offset;
- uint32_t unpacked_lun;
+ u64 unpacked_lun;
enum dma_data_direction dma_data_direction;
- uint32_t reset_count;
+ uint16_t vp_idx;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
@@ -939,6 +958,7 @@ struct qla_tgt_mgmt_cmd {
uint16_t tmr_func;
uint8_t fc_tm_rsp;
struct fc_port *sess;
+ struct qla_qpair *qpair;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
@@ -960,7 +980,6 @@ struct qla_tgt_prm {
int seg_cnt;
int req_cnt;
uint16_t rq_result;
- uint16_t scsi_status;
int sense_buffer_len;
int residual;
int add_status_pkt;
@@ -1040,7 +1059,8 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
-extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
+ response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *);
@@ -1073,11 +1093,13 @@ extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
-extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern int qlt_free_qfull_cmds(struct qla_qpair *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
-void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
uint8_t, uint8_t, uint8_t);
+extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
+ struct qla_tgt_cmd *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index c197972a3e2d..33142610882f 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -219,8 +219,6 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
{
if (buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
- ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
- "Skipping entry %d\n", ent->hdr.entry_type);
}
static int
@@ -818,6 +816,8 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01a,
"%s: entry count %lx\n", __func__, count);
while (count--) {
+ if (buf && *len >= vha->hw->fw_dump_len)
+ break;
if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
break;
ent = qla27xx_next_entry(ent);
@@ -825,18 +825,20 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (count)
ql_dbg(ql_dbg_misc, vha, 0xd018,
- "%s: residual count (%lx)\n", __func__, count);
+ "%s: entry residual count (%lx)\n", __func__, count);
if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
ql_dbg(ql_dbg_misc, vha, 0xd019,
- "%s: missing end (%lx)\n", __func__, count);
+ "%s: missing end entry (%lx)\n", __func__, count);
- ql_dbg(ql_dbg_misc, vha, 0xd01b,
- "%s: len=%lx\n", __func__, *len);
+ if (buf && *len != vha->hw->fw_dump_len)
+ ql_dbg(ql_dbg_misc, vha, 0xd01b,
+ "%s: length=%#lx residual=%+ld\n",
+ __func__, *len, vha->hw->fw_dump_len - *len);
if (buf) {
ql_log(ql_log_warn, vha, 0xd015,
- "Firmware dump saved to temp buffer (%ld/%p)\n",
+ "Firmware dump saved to temp buffer (%lu/%p)\n",
vha->host_no, vha->hw->fw_dump);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 45bc84e8e3bf..005a378f7fab 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "9.00.00.00-k"
+#define QLA2XXX_VERSION "10.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7443e4efa3ae..c4b414833b86 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
@@ -284,7 +283,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
- cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
+ cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -296,7 +295,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
- cmd->vha->tgt_counters.core_qla_free_cmd++;
+ cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
WARN_ON(cmd->trc_flags & TRC_CMD_DONE);
@@ -492,7 +491,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
}
#endif
- cmd->vha->tgt_counters.qla_core_sbt_cmd++;
+ cmd->qpair->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
@@ -520,7 +519,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- cmd->vha->tgt_counters.qla_core_ret_ctio++;
+ cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@@ -595,7 +594,7 @@ static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
-static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
uint16_t tmr_func, uint32_t tag)
{
struct fc_port *sess = mcmd->sess;
@@ -686,6 +685,19 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
struct qla_tgt_cmd, se_cmd);
int xmit_type = QLA_TGT_XMIT_STATUS;
+ if (cmd->aborted) {
+ /*
+ * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug(
+ "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd, kref_read(&cmd->se_cmd.cmd_kref),
+ cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
cmd->bufflen = se_cmd->data_length;
cmd->sg = NULL;
cmd->sg_cnt = 0;
@@ -1870,9 +1882,9 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
char *page)
{
return sprintf(page,
- "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
- UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
- utsname()->machine);
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n",
+ QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine, utsname()->release);
}
CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version);
@@ -1976,9 +1988,9 @@ static int tcm_qla2xxx_register_configfs(void)
{
int ret;
- pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
- UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
- utsname()->machine);
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n",
+ QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine, utsname()->release);
ret = target_register_template(&tcm_qla2xxx_ops);
if (ret)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61cdd99ae41e..3d38c6d463b8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -108,14 +108,7 @@ EXPORT_SYMBOL(scsi_sd_pm_domain);
*/
void scsi_put_command(struct scsi_cmnd *cmd)
{
- unsigned long flags;
-
- /* serious error if the command hasn't come from a device list */
- spin_lock_irqsave(&cmd->device->list_lock, flags);
- BUG_ON(list_empty(&cmd->list));
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&cmd->device->list_lock, flags);
-
+ scsi_del_cmd_from_list(cmd);
BUG_ON(delayed_work_pending(&cmd->abort_work));
}
@@ -807,11 +800,7 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-#ifdef CONFIG_SCSI_MQ_DEFAULT
bool scsi_use_blk_mq = true;
-#else
-bool scsi_use_blk_mq = false;
-#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index dc095a292c61..3be980d47268 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -245,7 +245,7 @@ struct sdebug_dev_info {
unsigned int channel;
unsigned int target;
u64 lun;
- uuid_be lu_name;
+ uuid_t lu_name;
struct sdebug_host_info *sdbg_host;
unsigned long uas_bm[1];
atomic_t num_in_q;
@@ -965,7 +965,7 @@ static const u64 naa3_comp_c = 0x3111111000000000ULL;
static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
int target_dev_id, int dev_id_num,
const char *dev_id_str, int dev_id_str_len,
- const uuid_be *lu_name)
+ const uuid_t *lu_name)
{
int num, port_a;
char b[32];
@@ -3568,7 +3568,7 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
}
static bool got_shared_uuid;
-static uuid_be shared_uuid;
+static uuid_t shared_uuid;
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
@@ -3578,12 +3578,12 @@ static struct sdebug_dev_info *sdebug_device_create(
devip = kzalloc(sizeof(*devip), flags);
if (devip) {
if (sdebug_uuid_ctl == 1)
- uuid_be_gen(&devip->lu_name);
+ uuid_gen(&devip->lu_name);
else if (sdebug_uuid_ctl == 2) {
if (got_shared_uuid)
devip->lu_name = shared_uuid;
else {
- uuid_be_gen(&shared_uuid);
+ uuid_gen(&shared_uuid);
got_shared_uuid = true;
devip->lu_name = shared_uuid;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index ecc07dab893d..ea9f40e51f68 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1628,11 +1628,17 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
"not ready after error recovery\n");
- scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+ sdev = scmd->device;
+
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
+
scsi_eh_finish_cmd(scmd, done_q);
}
return;
@@ -1874,7 +1880,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
}
}
-static void eh_lock_door_done(struct request *req, int uptodate)
+static void eh_lock_door_done(struct request *req, blk_status_t status)
{
__blk_put_request(req->q, req);
}
@@ -1903,7 +1909,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
if (IS_ERR(req))
return;
rq = scsi_req(req);
- scsi_req_init(req);
rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
rq->cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 99e16ac479e3..f6097b89d5d3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -45,23 +45,23 @@ static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
static inline struct kmem_cache *
-scsi_select_sense_cache(struct Scsi_Host *shost)
+scsi_select_sense_cache(bool unchecked_isa_dma)
{
- return shost->unchecked_isa_dma ?
- scsi_sense_isadma_cache : scsi_sense_cache;
+ return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
}
-static void scsi_free_sense_buffer(struct Scsi_Host *shost,
- unsigned char *sense_buffer)
+static void scsi_free_sense_buffer(bool unchecked_isa_dma,
+ unsigned char *sense_buffer)
{
- kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+ kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
+ sense_buffer);
}
-static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
+static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
gfp_t gfp_mask, int numa_node)
{
- return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
- numa_node);
+ return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
+ gfp_mask, numa_node);
}
int scsi_init_sense_cache(struct Scsi_Host *shost)
@@ -69,7 +69,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;
- cache = scsi_select_sense_cache(shost);
+ cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
return 0;
@@ -250,7 +250,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
- scsi_req_init(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
@@ -584,19 +583,9 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
-
- if (shost->use_cmd_list) {
- BUG_ON(list_empty(&cmd->list));
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
+ scsi_del_cmd_from_list(cmd);
}
/*
@@ -635,7 +624,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
cmd->request->next_rq->special = NULL;
}
-static bool scsi_end_request(struct request *req, int error,
+static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
struct scsi_cmnd *cmd = req->special;
@@ -694,45 +683,28 @@ static bool scsi_end_request(struct request *req, int error,
* @cmd: SCSI command (unused)
* @result: scsi error code
*
- * Translate SCSI error code into standard UNIX errno.
- * Return values:
- * -ENOLINK temporary transport failure
- * -EREMOTEIO permanent target failure, do not retry
- * -EBADE permanent nexus failure, retry on other path
- * -ENOSPC No write space available
- * -ENODATA Medium error
- * -EIO unspecified I/O error
+ * Translate SCSI error code into block errors.
*/
-static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
+ int result)
{
- int error = 0;
-
- switch(host_byte(result)) {
+ switch (host_byte(result)) {
case DID_TRANSPORT_FAILFAST:
- error = -ENOLINK;
- break;
+ return BLK_STS_TRANSPORT;
case DID_TARGET_FAILURE:
set_host_byte(cmd, DID_OK);
- error = -EREMOTEIO;
- break;
+ return BLK_STS_TARGET;
case DID_NEXUS_FAILURE:
- set_host_byte(cmd, DID_OK);
- error = -EBADE;
- break;
+ return BLK_STS_NEXUS;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
- error = -ENOSPC;
- break;
+ return BLK_STS_NOSPC;
case DID_MEDIUM_ERROR:
set_host_byte(cmd, DID_OK);
- error = -ENODATA;
- break;
+ return BLK_STS_MEDIUM;
default:
- error = -EIO;
- break;
+ return BLK_STS_IOERR;
}
-
- return error;
}
/*
@@ -769,7 +741,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
- int error = 0;
+ blk_status_t error = BLK_STS_OK;
struct scsi_sense_hdr sshdr;
bool sense_valid = false;
int sense_deferred = 0, level = 0;
@@ -808,7 +780,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* both sides at once.
*/
scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
- if (scsi_end_request(req, 0, blk_rq_bytes(req),
+ if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
return;
@@ -850,7 +822,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_print_sense(cmd);
result = 0;
/* for passthrough error may be set */
- error = 0;
+ error = BLK_STS_OK;
}
/*
@@ -922,18 +894,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
action = ACTION_FAIL;
- error = -EILSEQ;
+ error = BLK_STS_PROTECTION;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
action = ACTION_FAIL;
- error = -EREMOTEIO;
+ error = BLK_STS_TARGET;
} else
action = ACTION_FAIL;
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
if (sshdr.asc == 0x10) /* DIF */
- error = -EILSEQ;
+ error = BLK_STS_PROTECTION;
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -1134,11 +1106,54 @@ err_exit:
}
EXPORT_SYMBOL(scsi_init_io);
+/**
+ * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ *
+ * Called from inside blk_get_request().
+ */
+void scsi_initialize_rq(struct request *rq)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ scsi_req_init(&cmd->req);
+}
+EXPORT_SYMBOL(scsi_initialize_rq);
+
+/* Add a command to the list used by the aacraid and dpt_i2o drivers */
+void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (shost->use_cmd_list) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_add_tail(&cmd->list, &sdev->cmd_list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+}
+
+/* Remove a command from the list used by the aacraid and dpt_i2o drivers */
+void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (shost->use_cmd_list) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ BUG_ON(list_empty(&cmd->list));
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+}
+
+/* Called after a request has been started. */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
- unsigned long flags;
+ unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA;
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
@@ -1147,12 +1162,11 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
cmd->device = dev;
cmd->sense_buffer = buf;
cmd->prot_sdb = prot;
+ cmd->flags = unchecked_isa_dma;
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
+ scsi_add_cmd_to_list(cmd);
}
static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1829,58 +1843,45 @@ out_delay:
blk_delay_queue(q, SCSI_QUEUE_DELAY);
}
-static inline int prep_to_mq(int ret)
+static inline blk_status_t prep_to_mq(int ret)
{
switch (ret) {
case BLKPREP_OK:
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
case BLKPREP_DEFER:
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
default:
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_IOERR;
}
}
+/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
+static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
+{
+ return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
+ sizeof(struct scatterlist);
+}
+
static int scsi_mq_prep_fn(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
- unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
- /* zero out the cmd, except for the embedded scsi_request */
- memset((char *)cmd + sizeof(cmd->req), 0,
- sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
+ scsi_init_command(sdev, cmd);
req->special = cmd;
cmd->request = req;
- cmd->device = sdev;
- cmd->sense_buffer = sense_buf;
cmd->tag = req->tag;
-
cmd->prot_op = SCSI_PROT_NORMAL;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies;
-
- if (shost->use_cmd_list) {
- spin_lock_irq(&sdev->list_lock);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irq(&sdev->list_lock);
- }
-
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
if (scsi_host_get_prot(shost)) {
- cmd->prot_sdb = (void *)sg +
- min_t(unsigned int,
- shost->sg_tablesize, SG_CHUNK_SIZE) *
- sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
cmd->prot_sdb->table.sgl =
@@ -1909,7 +1910,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
@@ -1917,14 +1918,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- int ret;
+ blk_status_t ret;
int reason;
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
- if (ret != BLK_MQ_RQ_QUEUE_OK)
+ if (ret != BLK_STS_OK)
goto out;
- ret = BLK_MQ_RQ_QUEUE_BUSY;
+ ret = BLK_STS_RESOURCE;
if (!get_device(&sdev->sdev_gendev))
goto out;
@@ -1937,7 +1938,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
- if (ret != BLK_MQ_RQ_QUEUE_OK)
+ if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
@@ -1955,11 +1956,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
- ret = BLK_MQ_RQ_QUEUE_BUSY;
+ ret = BLK_STS_RESOURCE;
goto out_dec_host_busy;
}
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
out_dec_host_busy:
atomic_dec(&shost->host_busy);
@@ -1972,12 +1973,14 @@ out_put_device:
put_device(&sdev->sdev_gendev);
out:
switch (ret) {
- case BLK_MQ_RQ_QUEUE_BUSY:
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
break;
- case BLK_MQ_RQ_QUEUE_ERROR:
+ default:
/*
* Make sure to release all allocated ressources when
* we hit an error, as we will never see this command
@@ -1986,8 +1989,6 @@ out:
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
- default:
- break;
}
return ret;
}
@@ -2004,23 +2005,34 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = set->driver_data;
+ const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scatterlist *sg;
- cmd->sense_buffer =
- scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
+ if (unchecked_isa_dma)
+ cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
+ cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
+ GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
cmd->req.sense = cmd->sense_buffer;
+
+ if (scsi_host_get_prot(shost)) {
+ sg = (void *)cmd + sizeof(struct scsi_cmnd) +
+ shost->hostt->cmd_size;
+ cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
+ }
+
return 0;
}
static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
- struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
+ cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2057,6 +2069,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
+ queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+
/*
* this limit is imposed by hardware restrictions
*/
@@ -2093,11 +2107,15 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue);
static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
struct Scsi_Host *shost = q->rq_alloc_data;
+ const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
memset(cmd, 0, sizeof(*cmd));
- cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
+ if (unchecked_isa_dma)
+ cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
+ cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
+ NUMA_NO_NODE);
if (!cmd->sense_buffer)
goto fail;
cmd->req.sense = cmd->sense_buffer;
@@ -2111,19 +2129,19 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
return 0;
fail_free_sense:
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
fail:
return -ENOMEM;
}
static void scsi_exit_rq(struct request_queue *q, struct request *rq)
{
- struct Scsi_Host *shost = q->rq_alloc_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
+ cmd->sense_buffer);
}
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
@@ -2139,6 +2157,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
q->request_fn = scsi_request_fn;
q->init_rq_fn = scsi_init_rq;
q->exit_rq_fn = scsi_exit_rq;
+ q->initialize_rq_fn = scsi_initialize_rq;
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
@@ -2163,6 +2182,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
#endif
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
+ .initialize_rq_fn = scsi_initialize_rq,
.map_queues = scsi_map_queues,
};
@@ -2179,12 +2199,9 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
- unsigned int cmd_size, sgl_size, tbl_size;
+ unsigned int cmd_size, sgl_size;
- tbl_size = shost->sg_tablesize;
- if (tbl_size > SG_CHUNK_SIZE)
- tbl_size = SG_CHUNK_SIZE;
- sgl_size = tbl_size * sizeof(struct scatterlist);
+ sgl_size = scsi_mq_sgl_size(shost);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
@@ -2614,7 +2631,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_QUIESCE:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
- case SDEV_BLOCK:
break;
default:
goto illegal;
@@ -2628,6 +2644,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
case SDEV_CANCEL:
+ case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
break;
default:
@@ -2871,7 +2888,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int
scsi_device_quiesce(struct scsi_device *sdev)
{
- int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ int err;
+
+ mutex_lock(&sdev->state_mutex);
+ err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ mutex_unlock(&sdev->state_mutex);
+
if (err)
return err;
@@ -2899,10 +2921,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* so assume the state is being managed elsewhere (for example
* device deleted during suspend)
*/
- if (sdev->sdev_state != SDEV_QUIESCE ||
- scsi_device_set_state(sdev, SDEV_RUNNING))
- return;
- scsi_run_queue(sdev->request_queue);
+ mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE &&
+ scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
+ scsi_run_queue(sdev->request_queue);
+ mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -2933,28 +2956,20 @@ scsi_target_resume(struct scsi_target *starget)
EXPORT_SYMBOL(scsi_target_resume);
/**
- * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
- * @sdev: device to block
- * @wait: Whether or not to wait until ongoing .queuecommand() /
- * .queue_rq() calls have finished.
+ * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
+ * @sdev: device to block
*
- * Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. May sleep.
+ * Pause SCSI command processing on the specified device. Does not sleep.
*
- * Returns zero if successful or error if not
+ * Returns zero if successful or a negative error code upon failure.
*
- * Notes:
- * This routine transitions the device to the SDEV_BLOCK state
- * (which must be a legal transition). When the device is in this
- * state, all commands are deferred until the scsi lld reenables
- * the device with scsi_device_unblock or device_block_tmo fires.
- *
- * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
- * scsi_internal_device_block() has blocked a SCSI device and also
- * remove the rport mutex lock and unlock calls from srp_queuecommand().
+ * Notes:
+ * This routine transitions the device to the SDEV_BLOCK state (which must be
+ * a legal transition). When the device is in this state, command processing
+ * is paused until the device leaves the SDEV_BLOCK state. See also
+ * scsi_internal_device_unblock_nowait().
*/
-int
-scsi_internal_device_block(struct scsi_device *sdev, bool wait)
+int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
@@ -2974,45 +2989,86 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
* request queue.
*/
if (q->mq_ops) {
- if (wait)
- blk_mq_quiesce_queue(q);
- else
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue_nowait(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (wait)
- scsi_wait_for_queuecommand(sdev);
}
return 0;
}
-EXPORT_SYMBOL_GPL(scsi_internal_device_block);
-
+EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
+
/**
- * scsi_internal_device_unblock - resume a device after a block request
- * @sdev: device to resume
- * @new_state: state to set devices to after unblocking
+ * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
+ * @sdev: device to block
+ *
+ * Pause SCSI command processing on the specified device and wait until all
+ * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
*
- * Called by scsi lld's or the midlayer to restart the device queue
- * for the previously suspended scsi device. Called from interrupt or
- * normal process context.
+ * Returns zero if successful or a negative error code upon failure.
*
- * Returns zero if successful or error if not.
+ * Note:
+ * This routine transitions the device to the SDEV_BLOCK state (which must be
+ * a legal transition). When the device is in this state, command processing
+ * is paused until the device leaves the SDEV_BLOCK state. See also
+ * scsi_internal_device_unblock().
*
- * Notes:
- * This routine transitions the device to the SDEV_RUNNING state
- * or to one of the offline states (which must be a legal transition)
- * allowing the midlayer to goose the queue for this device.
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
-int
-scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state)
+static int scsi_internal_device_block(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+ int err;
+
+ mutex_lock(&sdev->state_mutex);
+ err = scsi_internal_device_block_nowait(sdev);
+ if (err == 0) {
+ if (q->mq_ops)
+ blk_mq_quiesce_queue(q);
+ else
+ scsi_wait_for_queuecommand(sdev);
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ return err;
+}
+
+void scsi_start_queue(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
unsigned long flags;
+ if (q->mq_ops) {
+ blk_mq_unquiesce_queue(q);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+/**
+ * scsi_internal_device_unblock_nowait - resume a device after a block request
+ * @sdev: device to resume
+ * @new_state: state to set the device to after unblocking
+ *
+ * Restart the device queue for a previously suspended SCSI device. Does not
+ * sleep.
+ *
+ * Returns zero if successful or a negative error code upon failure.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state or to one of
+ * the offline states (which must be a legal transition) allowing the midlayer
+ * to goose the queue for this device.
+ */
+int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
+{
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
@@ -3030,22 +3086,42 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
- if (q->mq_ops) {
- blk_mq_start_stopped_hw_queues(q, false);
- } else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ scsi_start_queue(sdev);
return 0;
}
-EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
+EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
+
+/**
+ * scsi_internal_device_unblock - resume a device after a block request
+ * @sdev: device to resume
+ * @new_state: state to set the device to after unblocking
+ *
+ * Restart the device queue for a previously suspended SCSI device. May sleep.
+ *
+ * Returns zero if successful or a negative error code upon failure.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state or to one of
+ * the offline states (which must be a legal transition) allowing the midlayer
+ * to goose the queue for this device.
+ */
+static int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
+{
+ int ret;
+
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_unblock_nowait(sdev, new_state);
+ mutex_unlock(&sdev->state_mutex);
+
+ return ret;
+}
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev, true);
+ scsi_internal_device_block(sdev);
}
static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 59ebc1795bb3..c11c1f9c912c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -80,6 +80,8 @@ int scsi_eh_get_sense(struct list_head *work_q,
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
+extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
+extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
@@ -88,6 +90,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
+extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f7128f49c30..fd88dabd599d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -231,6 +231,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->id = starget->id;
sdev->lun = lun;
sdev->channel = starget->channel;
+ mutex_init(&sdev->state_mutex);
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
@@ -384,11 +385,12 @@ static void scsi_target_reap_ref_release(struct kref *kref)
= container_of(kref, struct scsi_target, reap_ref);
/*
- * if we get here and the target is still in the CREATED state that
+ * if we get here and the target is still in a CREATED state that
* means it was allocated but never made visible (because a scan
* turned up no LUNs), so don't call device_del() on it.
*/
- if (starget->state != STARGET_CREATED) {
+ if ((starget->state != STARGET_CREATED) &&
+ (starget->state != STARGET_CREATED_REMOVE)) {
transport_remove_device(&starget->dev);
device_del(&starget->dev);
}
@@ -655,8 +657,6 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
- else if (BLIST_INQUIRY_58 & *bflags)
- next_inquiry_len = 58;
else if (sdev->inquiry_len)
next_inquiry_len = sdev->inquiry_len;
else
@@ -926,15 +926,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->use_10_for_rw = 1;
- if (*bflags & BLIST_MS_SKIP_PAGE_08)
- sdev->skip_ms_page_8 = 1;
-
- if (*bflags & BLIST_MS_SKIP_PAGE_3F)
- sdev->skip_ms_page_3f = 1;
-
- if (*bflags & BLIST_USE_10_BYTE_MS)
- sdev->use_10_for_ms = 1;
-
/* some devices don't like REPORT SUPPORTED OPERATION CODES
* and will simply timeout causing sd_mod init to take a very
* very long time */
@@ -943,21 +934,19 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure
* may do I/O */
+ mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (ret) {
+ if (ret)
ret = scsi_device_set_state(sdev, SDEV_BLOCK);
+ mutex_unlock(&sdev->state_mutex);
- if (ret) {
- sdev_printk(KERN_ERR, sdev,
- "in wrong state %s to complete scan\n",
- scsi_device_state_name(sdev->sdev_state));
- return SCSI_SCAN_NO_RESPONSE;
- }
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev,
+ "in wrong state %s to complete scan\n",
+ scsi_device_state_name(sdev->sdev_state));
+ return SCSI_SCAN_NO_RESPONSE;
}
- if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
- sdev->use_192_bytes_for_3f = 1;
-
if (*bflags & BLIST_NOT_LOCKABLE)
sdev->lockable = 0;
@@ -967,9 +956,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
- if (*bflags & BLIST_SYNC_ALUA)
- sdev->synchronous_alua = 1;
-
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -1051,10 +1037,11 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* allocate and set it up by calling scsi_add_lun.
*
* Return:
- * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
- * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
+ *
+ * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
+ * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
* attached at the LUN
- * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
+ * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
u64 lun, int *bflagsp,
@@ -1107,7 +1094,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
/*
* result contains valid SCSI INQUIRY data.
*/
- if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
+ if ((result[0] >> 5) == 3) {
/*
* For a Peripheral qualifier 3 (011b), the SCSI
* spec says: The device server is not capable of
@@ -1265,11 +1252,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
*/
if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
max_dev_lun = min(8U, max_dev_lun);
-
- /*
- * Stop scanning at 255 unless BLIST_SCSI3LUN
- */
- if (!(bflags & BLIST_SCSI3LUN))
+ else
max_dev_lun = min(256U, max_dev_lun);
/*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 82dfe07b1d47..d6984df71f1c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -719,7 +719,7 @@ static ssize_t
store_state_field(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
+ int i, ret;
struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0;
@@ -734,9 +734,11 @@ store_state_field(struct device *dev, struct device_attribute *attr,
if (!state)
return -EINVAL;
- if (scsi_device_set_state(sdev, state))
- return -EINVAL;
- return count;
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_device_set_state(sdev, state);
+ mutex_unlock(&sdev->state_mutex);
+
+ return ret == 0 ? count : -EINVAL;
}
static ssize_t
@@ -1272,6 +1274,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
void __scsi_remove_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
+ int res;
/*
* This cleanup path is not reentrant and while it is impossible
@@ -1282,7 +1285,25 @@ void __scsi_remove_device(struct scsi_device *sdev)
return;
if (sdev->is_visible) {
- if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
+ /*
+ * If scsi_internal_target_block() is running concurrently,
+ * wait until it has finished before changing the device state.
+ */
+ mutex_lock(&sdev->state_mutex);
+ /*
+ * If blocked, we go straight to DEL and restart the queue so
+ * any commands issued during driver shutdown (like sync
+ * cache) are errored immediately.
+ */
+ res = scsi_device_set_state(sdev, SDEV_CANCEL);
+ if (res != 0) {
+ res = scsi_device_set_state(sdev, SDEV_DEL);
+ if (res == 0)
+ scsi_start_queue(sdev);
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ if (res != 0)
return;
bsg_unregister_queue(sdev->request_queue);
@@ -1298,7 +1319,10 @@ void __scsi_remove_device(struct scsi_device *sdev)
* scsi_run_queue() invocations have finished before tearing down the
* device.
*/
+ mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_DEL);
+ mutex_unlock(&sdev->state_mutex);
+
blk_cleanup_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
@@ -1370,11 +1394,15 @@ restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL ||
- starget->state == STARGET_REMOVE)
+ starget->state == STARGET_REMOVE ||
+ starget->state == STARGET_CREATED_REMOVE)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
- starget->state = STARGET_REMOVE;
+ if (starget->state == STARGET_CREATED)
+ starget->state = STARGET_CREATED_REMOVE;
+ else
+ starget->state = STARGET_REMOVE;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index d4cf32d55546..7e24aa30c3b0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1692,7 +1692,7 @@ fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
* Host Statistics Management
*/
-/* Show a given an attribute in the statistics group */
+/* Show a given attribute in the statistics group */
static ssize_t
fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
{
@@ -2914,16 +2914,18 @@ EXPORT_SYMBOL(fc_remote_port_add);
* port is no longer part of the topology. Note: Although a port
* may no longer be part of the topology, it may persist in the remote
* ports displayed by the fc_host. We do this under 2 conditions:
+ *
* 1) If the port was a scsi target, we delay its deletion by "blocking" it.
- * This allows the port to temporarily disappear, then reappear without
- * disrupting the SCSI device tree attached to it. During the "blocked"
- * period the port will still exist.
+ * This allows the port to temporarily disappear, then reappear without
+ * disrupting the SCSI device tree attached to it. During the "blocked"
+ * period the port will still exist.
+ *
* 2) If the port was a scsi target and disappears for longer than we
- * expect, we'll delete the port and the tear down the SCSI device tree
- * attached to it. However, we want to semi-persist the target id assigned
- * to that port if it eventually does exist. The port structure will
- * remain (although with minimal information) so that the target id
- * bindings remails.
+ * expect, we'll delete the port and the tear down the SCSI device tree
+ * attached to it. However, we want to semi-persist the target id assigned
+ * to that port if it eventually does exist. The port structure will
+ * remain (although with minimal information) so that the target id
+ * bindings also remain.
*
* If the remote port is not an FCP Target, it will be fully torn down
* and deallocated, including the fc_remote_port class device.
@@ -2937,7 +2939,7 @@ EXPORT_SYMBOL(fc_remote_port_add);
* If the remote port does not return (signaled by a LLDD call to
* fc_remote_port_add()) within the dev_loss_tmo timeout, then the
* scsi target is removed - killing all outstanding i/o and removing the
- * scsi devices attached ot it. The port structure will be marked Not
+ * scsi devices attached to it. The port structure will be marked Not
* Present and be partially cleared, leaving only enough information to
* recognize the remote port relative to the scsi target id binding if
* it later appears. The port will remain as long as there is a valid
@@ -3056,7 +3058,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
* There may have been a delete timer running on the
* port. Ensure that it is cancelled as we now know
* the port is an FCP Target.
- * Note: we know the rport is exists and in an online
+ * Note: we know the rport exists and is in an online
* state as the LLDD would not have had an rport
* reference to pass us.
*
@@ -3317,7 +3319,7 @@ EXPORT_SYMBOL(fc_block_scsi_eh);
* @ret_vport: The pointer to the created vport.
*
* Allocates and creates the vport structure, calls the parent host
- * to instantiate the vport, the completes w/ class and sysfs creation.
+ * to instantiate the vport, this completes w/ class and sysfs creation.
*
* Notes:
* This routine assumes no locks are held on entry.
@@ -3397,7 +3399,7 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
/*
* if the parent isn't the physical adapter's Scsi_Host, ensure
- * the Scsi_Host at least contains ia symlink to the vport.
+ * the Scsi_Host at least contains a symlink to the vport.
*/
if (pdev != &shost->shost_gendev) {
error = sysfs_create_link(&shost->shost_gendev.kobj,
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0ebe2f1bb908..5006a656e16a 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -33,6 +33,7 @@
#include <linux/bsg.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -172,7 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct request *req;
- int ret;
+ blk_status_t ret;
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while ((req = blk_fetch_request(q)) != NULL) {
@@ -230,6 +231,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return -ENOMEM;
+ q->initialize_rq_fn = scsi_initialize_rq;
q->cmd_size = sizeof(struct scsi_request);
if (rphy) {
@@ -249,6 +251,11 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
if (error)
goto out_cleanup_queue;
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
error = bsg_register_queue(q, dev, name, release);
if (error)
goto out_cleanup_queue;
@@ -264,6 +271,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
q->queuedata = shost;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
return 0;
out_cleanup_queue:
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 3c5d89852e9f..f617021c94f7 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -554,11 +554,12 @@ int srp_reconnect_rport(struct srp_rport *rport)
* invoking scsi_target_unblock() won't change the state of
* these devices into running so do that explicitly.
*/
- spin_lock_irq(shost->host_lock);
- __shost_for_each_device(sdev, shost)
+ shost_for_each_device(sdev, shost) {
+ mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
- spin_unlock_irq(shost->host_lock);
+ mutex_unlock(&sdev->state_mutex);
+ }
} else if (rport->state == SRP_RPORT_RUNNING) {
/*
* srp_reconnect_rport() has been invoked with fast_io_fail
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 910f4a7a3924..31273468589c 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -116,8 +116,8 @@ EXPORT_SYMBOL(scsicam_bios_param);
* @hds: put heads here
* @secs: put sectors here
*
- * Description: determine the BIOS mapping/geometry used to create the partition
- * table, storing the results in *cyls, *hds, and *secs
+ * Determine the BIOS mapping/geometry used to create the partition
+ * table, storing the results in @cyls, @hds, and @secs
*
* Returns: -1 on failure, 0 on success.
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b6bb4e0ce0e3..bea36adeee17 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -50,6 +50,7 @@
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
#include <linux/pr.h>
#include <linux/t10-pi.h>
@@ -155,7 +156,7 @@ static ssize_t
cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int i, ct = -1, rcd, wce, sp;
+ int ct, rcd, wce, sp;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
char buffer[64];
@@ -178,16 +179,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sdkp->cache_override = 0;
}
- for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
- len = strlen(sd_cache_types[i]);
- if (strncmp(sd_cache_types[i], buf, len) == 0 &&
- buf[len] == '\n') {
- ct = i;
- break;
- }
- }
+ ct = sysfs_match_string(sd_cache_types, buf);
if (ct < 0)
return -EINVAL;
+
rcd = ct & 0x01 ? 1 : 0;
wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
@@ -227,7 +222,7 @@ manage_start_stop_show(struct device *dev, struct device_attribute *attr,
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
+ return sprintf(buf, "%u\n", sdp->manage_start_stop);
}
static ssize_t
@@ -251,7 +246,7 @@ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
+ return sprintf(buf, "%u\n", sdkp->device->allow_restart);
}
static ssize_t
@@ -279,7 +274,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
struct scsi_disk *sdkp = to_scsi_disk(dev);
int ct = sdkp->RCD + 2*sdkp->WCE;
- return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
+ return sprintf(buf, "%s\n", sd_cache_types[ct]);
}
static DEVICE_ATTR_RW(cache_type);
@@ -288,7 +283,7 @@ FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
+ return sprintf(buf, "%u\n", sdkp->DPOFUA);
}
static DEVICE_ATTR_RO(FUA);
@@ -298,7 +293,7 @@ protection_type_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->protection_type);
+ return sprintf(buf, "%u\n", sdkp->protection_type);
}
static ssize_t
@@ -341,9 +336,9 @@ protection_mode_show(struct device *dev, struct device_attribute *attr,
}
if (!dif && !dix)
- return snprintf(buf, 20, "none\n");
+ return sprintf(buf, "none\n");
- return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+ return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
}
static DEVICE_ATTR_RO(protection_mode);
@@ -352,7 +347,7 @@ app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->ATO);
+ return sprintf(buf, "%u\n", sdkp->ATO);
}
static DEVICE_ATTR_RO(app_tag_own);
@@ -362,10 +357,11 @@ thin_provisioning_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+ return sprintf(buf, "%u\n", sdkp->lbpme);
}
static DEVICE_ATTR_RO(thin_provisioning);
+/* sysfs_match_string() requires dense arrays */
static const char *lbp_mode[] = {
[SD_LBP_FULL] = "full",
[SD_LBP_UNMAP] = "unmap",
@@ -381,7 +377,7 @@ provisioning_mode_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+ return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
}
static ssize_t
@@ -390,6 +386,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ int mode;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -402,23 +399,17 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK)
return -EINVAL;
- if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
- sd_config_discard(sdkp, SD_LBP_UNMAP);
- else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
- sd_config_discard(sdkp, SD_LBP_WS16);
- else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
- sd_config_discard(sdkp, SD_LBP_WS10);
- else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
- sd_config_discard(sdkp, SD_LBP_ZERO);
- else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
- sd_config_discard(sdkp, SD_LBP_DISABLE);
- else
+ mode = sysfs_match_string(lbp_mode, buf);
+ if (mode < 0)
return -EINVAL;
+ sd_config_discard(sdkp, mode);
+
return count;
}
static DEVICE_ATTR_RW(provisioning_mode);
+/* sysfs_match_string() requires dense arrays */
static const char *zeroing_mode[] = {
[SD_ZERO_WRITE] = "write",
[SD_ZERO_WS] = "writesame",
@@ -432,7 +423,7 @@ zeroing_mode_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
+ return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
}
static ssize_t
@@ -440,21 +431,17 @@ zeroing_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
+ int mode;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (!strncmp(buf, zeroing_mode[SD_ZERO_WRITE], 20))
- sdkp->zeroing_mode = SD_ZERO_WRITE;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS], 20))
- sdkp->zeroing_mode = SD_ZERO_WS;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS16_UNMAP], 20))
- sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS10_UNMAP], 20))
- sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
- else
+ mode = sysfs_match_string(zeroing_mode, buf);
+ if (mode < 0)
return -EINVAL;
+ sdkp->zeroing_mode = mode;
+
return count;
}
static DEVICE_ATTR_RW(zeroing_mode);
@@ -465,7 +452,7 @@ max_medium_access_timeouts_show(struct device *dev,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
+ return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
}
static ssize_t
@@ -491,7 +478,7 @@ max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+ return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
}
static ssize_t
@@ -643,6 +630,26 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
+#ifdef CONFIG_BLK_SED_OPAL
+static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
+ size_t len, bool send)
+{
+ struct scsi_device *sdev = data;
+ u8 cdb[12] = { 0, };
+ int ret;
+
+ cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
+ cdb[1] = secp;
+ put_unaligned_be16(spsp, &cdb[2]);
+ put_unaligned_be32(len, &cdb[6]);
+
+ ret = scsi_execute_req(sdev, cdb,
+ send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ return ret <= 0 ? ret : -EIO;
+}
+#endif /* CONFIG_BLK_SED_OPAL */
+
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -696,6 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
switch (mode) {
+ case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1454,6 +1462,9 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
if (error)
goto out;
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(sdkp->opal_dev, cmd, p);
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to block level and then onto mid level if they can't be
@@ -1818,8 +1829,9 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_device *sdev = scmd->device;
- if (!scsi_device_online(scmd->device) ||
+ if (!scsi_device_online(sdev) ||
!scsi_medium_access_command(scmd) ||
host_byte(scmd->result) != DID_TIME_OUT ||
eh_disp != SUCCESS)
@@ -1845,7 +1857,9 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
scmd_printk(KERN_ERR, scmd,
"Medium access timeout failure. Offlining disk!\n");
- scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
return SUCCESS;
}
@@ -3014,6 +3028,20 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->ws10 = 1;
}
+static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdev = sdkp->device;
+
+ if (!sdev->security_supported)
+ return;
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
+ SECURITY_PROTOCOL_IN) == 1 &&
+ scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
+ SECURITY_PROTOCOL_OUT) == 1)
+ sdkp->security = 1;
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3067,6 +3095,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
+ sd_read_security(sdkp, buffer);
}
sdkp->first_scan = 0;
@@ -3227,6 +3256,12 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
+ if (sdkp->security) {
+ sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ if (sdkp->opal_dev)
+ sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
+ }
+
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
scsi_autopm_put_device(sdp);
@@ -3376,6 +3411,8 @@ static int sd_remove(struct device *dev)
sd_zbc_remove(sdkp);
+ free_opal_dev(sdkp->opal_dev);
+
blk_register_region(devt, SD_MINORS, NULL,
sd_default_probe, NULL, NULL);
@@ -3528,6 +3565,7 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ int ret;
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
@@ -3536,7 +3574,10 @@ static int sd_resume(struct device *dev)
return 0;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
- return sd_start_stop_device(sdkp, 1);
+ ret = sd_start_stop_device(sdkp, 1);
+ if (!ret)
+ opal_unlock_from_suspend(sdkp->opal_dev);
+ return ret;
}
/**
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 61d02efd366c..99c4dde9b6bf 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -71,6 +71,7 @@ struct scsi_disk {
struct scsi_device *device;
struct device dev;
struct gendisk *disk;
+ struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
unsigned int zone_blocks;
@@ -114,6 +115,7 @@ struct scsi_disk {
unsigned rc_basis: 2;
unsigned zoned: 2;
unsigned urswrz : 1;
+ unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 82c33a6edbea..21225d62b0c1 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, int uptodate);
+static void sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -808,7 +808,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_end_request_all(srp->rq, -EIO);
+ blk_end_request_all(srp->rq, BLK_STS_IOERR);
srp->rq = NULL;
}
@@ -1300,7 +1300,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* level when a command is completed (or has failed).
*/
static void
-sg_rq_end_io(struct request *rq, int uptodate)
+sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct sg_request *srp = rq->end_io_data;
struct scsi_request *req = scsi_req(rq);
@@ -1732,8 +1732,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
}
req = scsi_req(rq);
- scsi_req_init(rq);
-
if (hp->cmd_len > BLK_MAX_CDB)
req->cmd = long_cmdp;
memcpy(req->cmd, cmd, hp->cmd_len);
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 71b4b91d2215..80cfa93e407c 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -249,8 +249,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
- hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
- &hdata->dma, GFP_KERNEL);
+ hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!hdata->cpu) {
printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
"host %d buffer.\n", unit);
@@ -289,7 +289,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
out_irq:
free_irq(irq, host);
out_free:
- dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_ATTR_NON_CONSISTENT);
out_put:
scsi_host_put(host);
out:
@@ -305,7 +306,8 @@ static int sgiwd93_remove(struct platform_device *pdev)
scsi_remove_host(host);
free_irq(pd->irq, host);
- dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_ATTR_NON_CONSISTENT);
scsi_host_put(host);
return 0;
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index b673825f46b5..07ec8a8877de 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -16,6 +16,8 @@
*
*/
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#if !defined(_SMARTPQI_H)
#define _SMARTPQI_H
@@ -61,7 +63,7 @@ struct pqi_device_registers {
/*
* controller registers
*
- * These are defined by the PMC implementation.
+ * These are defined by the Microsemi implementation.
*
* Some registers (those named sis_*) are only used when in
* legacy SIS mode before we transition the controller into
@@ -102,6 +104,12 @@ enum pqi_io_path {
AIO_PATH = 1
};
+enum pqi_irq_mode {
+ IRQ_MODE_NONE,
+ IRQ_MODE_INTX,
+ IRQ_MODE_MSIX
+};
+
struct pqi_sg_descriptor {
__le64 address;
__le32 length;
@@ -484,7 +492,6 @@ struct pqi_raid_error_info {
#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
-#define PQI_EVENT_TYPE_HEARTBEAT 0xff
#pragma pack()
@@ -629,17 +636,70 @@ struct pqi_encryption_info {
u32 encrypt_tweak_upper;
};
-#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#pragma pack(1)
+
+#define PQI_CONFIG_TABLE_SIGNATURE "CFGTABLE"
+#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0)
+
+/* configuration table section IDs */
+#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0
+#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1
+#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2
+#define PQI_CONFIG_TABLE_SECTION_DEBUG 3
+#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4
+
+struct pqi_config_table {
+ u8 signature[8]; /* "CFGTABLE" */
+ __le32 first_section_offset; /* offset in bytes from the base */
+ /* address of this table to the */
+ /* first section */
+};
+
+struct pqi_config_table_section_header {
+ __le16 section_id; /* as defined by the */
+ /* PQI_CONFIG_TABLE_SECTION_* */
+ /* manifest constants above */
+ __le16 next_section_offset; /* offset in bytes from base */
+ /* address of the table of the */
+ /* next section or 0 if last entry */
+};
+
+struct pqi_config_table_general_info {
+ struct pqi_config_table_section_header header;
+ __le32 section_length; /* size of this section in bytes */
+ /* including the section header */
+ __le32 max_outstanding_requests; /* max. outstanding */
+ /* commands supported by */
+ /* the controller */
+ __le32 max_sg_size; /* max. transfer size of a single */
+ /* command */
+ __le32 max_sg_per_request; /* max. number of scatter-gather */
+ /* entries supported in a single */
+ /* command */
+};
+
+struct pqi_config_table_debug {
+ struct pqi_config_table_section_header header;
+ __le32 scratchpad;
+};
+
+struct pqi_config_table_heartbeat {
+ struct pqi_config_table_section_header header;
+ __le32 heartbeat_counter;
+};
+
+#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
+#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
#define PQI_HBA_BUS 2
-#define PQI_MAX_BUS PQI_HBA_BUS
-
-#pragma pack(1)
+#define PQI_EXTERNAL_RAID_VOLUME_BUS 3
+#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
struct report_lun_header {
__be32 list_length;
@@ -668,7 +728,6 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1
#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
@@ -726,14 +785,15 @@ struct pqi_scsi_dev {
__be64 wwid;
u8 volume_id[16];
u8 is_physical_device : 1;
+ u8 is_external_raid_device : 1;
u8 target_lun_valid : 1;
- u8 expose_device : 1;
- u8 no_uld_attach : 1;
- u8 aio_enabled : 1; /* only valid for physical disks */
u8 device_gone : 1;
u8 new_device : 1;
u8 keep_device : 1;
u8 volume_offline : 1;
+ bool aio_enabled; /* only valid for physical disks */
+ bool in_reset;
+ bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
u64 sas_address;
@@ -747,12 +807,11 @@ struct pqi_scsi_dev {
u8 bay;
u8 box[8];
u16 phys_connector[8];
- int offload_configured; /* I/O accel RAID offload configured */
- int offload_enabled; /* I/O accel RAID offload enabled */
- int offload_enabled_pending;
- int offload_to_mirror; /* Send next I/O accelerator RAID */
- /* offload request to mirror drive. */
- struct raid_map *raid_map; /* I/O accelerator RAID map */
+ bool raid_bypass_configured; /* RAID bypass configured */
+ bool raid_bypass_enabled; /* RAID bypass enabled */
+ int offload_to_mirror; /* Send next RAID bypass request */
+ /* to mirror drive. */
+ struct raid_map *raid_map; /* RAID bypass map */
struct pqi_sas_port *sas_port;
struct scsi_device *sdev;
@@ -761,13 +820,15 @@ struct pqi_scsi_dev {
struct list_head new_device_list_entry;
struct list_head add_list_entry;
struct list_head delete_list_entry;
+
+ atomic_t scsi_cmds_outstanding;
};
/* VPD inquiry pages */
#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
-#define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */
+#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
#define VPD_PAGE (1 << 8)
@@ -851,7 +912,9 @@ struct pqi_io_request {
void (*io_complete_callback)(struct pqi_io_request *io_request,
void *context);
void *context;
+ u8 raid_bypass : 1;
int status;
+ struct pqi_queue_group *queue_group;
struct scsi_cmnd *scmd;
void *error_info;
struct pqi_sg_descriptor *sg_chain_buffer;
@@ -860,15 +923,7 @@ struct pqi_io_request {
struct list_head request_list_entry;
};
-/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
-#define PQI_EVENT_HEARTBEAT 0
-#define PQI_EVENT_HOTPLUG 1
-#define PQI_EVENT_HARDWARE 2
-#define PQI_EVENT_PHYSICAL_DEVICE 3
-#define PQI_EVENT_LOGICAL_DEVICE 4
-#define PQI_EVENT_AIO_STATE_CHANGE 5
-#define PQI_EVENT_AIO_CONFIG_CHANGE 6
-#define PQI_NUM_SUPPORTED_EVENTS 7
+#define PQI_NUM_SUPPORTED_EVENTS 6
struct pqi_event {
bool pending;
@@ -911,7 +966,7 @@ struct pqi_ctrl_info {
dma_addr_t error_buffer_dma_handle;
size_t sg_chain_buffer_length;
unsigned int num_queue_groups;
- unsigned int num_active_queue_groups;
+ u16 max_hw_queue_index;
u16 num_elements_per_iq;
u16 num_elements_per_oq;
u16 max_inbound_iu_length_per_firmware;
@@ -926,6 +981,7 @@ struct pqi_ctrl_info {
struct pqi_admin_queues admin_queues;
struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
struct pqi_event_queue event_queue;
+ enum pqi_irq_mode irq_mode;
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
@@ -933,11 +989,12 @@ struct pqi_ctrl_info {
struct Scsi_Host *scsi_host;
struct mutex scan_mutex;
+ struct mutex lun_reset_mutex;
+ bool controller_online;
+ bool block_requests;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
- u8 controller_online : 1;
- u8 heartbeat_timer_started : 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -951,20 +1008,28 @@ struct pqi_ctrl_info {
struct pqi_io_request *io_request_pool;
u16 next_io_request_slot;
- struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+ struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
struct work_struct event_work;
atomic_t num_interrupts;
int previous_num_interrupts;
- unsigned int num_heartbeats_requested;
+ u32 previous_heartbeat_count;
+ __le32 __iomem *heartbeat_counter;
struct timer_list heartbeat_timer;
+ struct work_struct ctrl_offline_work;
struct semaphore sync_request_sem;
- struct semaphore lun_reset_sem;
+ atomic_t num_busy_threads;
+ atomic_t num_blocked_threads;
+ wait_queue_head_t block_requests_wait;
+
+ struct list_head raid_bypass_retry_list;
+ spinlock_t raid_bypass_retry_list_lock;
+ struct work_struct raid_bypass_retry_work;
};
enum pqi_ctrl_mode {
- UNKNOWN,
+ SIS_MODE = 0,
PQI_MODE
};
@@ -973,9 +1038,6 @@ enum pqi_ctrl_mode {
*/
#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
-/* 0 = no limit */
-#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0
-
/* CISS commands */
#define CISS_READ 0xc0
#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
@@ -996,13 +1058,13 @@ enum pqi_ctrl_mode {
#define BMIC_WRITE_HOST_WELLNESS 0xa5
#define BMIC_CACHE_FLUSH 0xc2
-#define SA_CACHE_FLUSH 0x01
+#define SA_CACHE_FLUSH 0x1
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
-#define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f)
+#define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f)
#define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
#define CISS_GET_DRIVE_NUMBER(lunid) \
- (((CISS_GET_BUS((lunid)) - 1) << 8) + \
+ (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
CISS_GET_LEVEL_2_TARGET((lunid)))
#define NO_TIMEOUT ((unsigned long) -1)
@@ -1069,9 +1131,9 @@ struct bmic_identify_physical_device {
u8 multi_lun_device_lun_count;
u8 minimum_good_fw_revision[8];
u8 unique_inquiry_bytes[20];
- u8 current_temperature_degreesC;
- u8 temperature_threshold_degreesC;
- u8 max_temperature_degreesC;
+ u8 current_temperature_degrees;
+ u8 temperature_threshold_degrees;
+ u8 max_temperature_degrees;
u8 logical_blocks_per_phys_block_exp;
__le16 current_queue_depth_limit;
u8 switch_name[10];
@@ -1084,10 +1146,22 @@ struct bmic_identify_physical_device {
u8 smart_carrier_authentication;
u8 smart_carrier_app_fw_version;
u8 smart_carrier_bootloader_fw_version;
+ u8 sanitize_flags;
+ u8 encryption_key_flags;
u8 encryption_key_name[64];
__le32 misc_drive_flags;
__le16 dek_index;
- u8 padding[112];
+ __le16 hba_drive_encryption_flags;
+ __le16 max_overwrite_time;
+ __le16 max_block_erase_time;
+ __le16 max_crypto_erase_time;
+ u8 connector_info[5];
+ u8 connector_name[8][8];
+ u8 page_83_identifier[16];
+ u8 maximum_link_rate[256];
+ u8 negotiated_physical_link_rate[256];
+ u8 box_connector_name[8];
+ u8 padding_to_multiple_of_512[9];
};
#pragma pack()
@@ -1099,36 +1173,8 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
void pqi_remove_sas_device(struct pqi_scsi_dev *device);
struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
+void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
extern struct sas_function_template pqi_sas_transport_functions;
-#if !defined(readq)
-#define readq readq
-static inline u64 readq(const volatile void __iomem *addr)
-{
- u32 lower32;
- u32 upper32;
-
- lower32 = readl(addr);
- upper32 = readl(addr + 4);
-
- return ((u64)upper32 << 32) | lower32;
-}
-#endif
-
-#if !defined(writeq)
-#define writeq writeq
-static inline void writeq(u64 value, volatile void __iomem *addr)
-{
- u32 lower32;
- u32 upper32;
-
- lower32 = lower_32_bits(value);
- upper32 = upper_32_bits(value);
-
- writel(lower32, addr);
- writel(upper32, addr + 4);
-}
-#endif
-
#endif /* _SMARTPQI_H */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 657ad15682a3..cb8f886e705c 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
@@ -39,15 +40,18 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "0.9.13-370"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 9
-#define DRIVER_RELEASE 13
-#define DRIVER_REVISION 370
+#define DRIVER_VERSION "1.0.4-100"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_RELEASE 4
+#define DRIVER_REVISION 100
-#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
+#define DRIVER_NAME "Microsemi PQI Driver (v" \
+ DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
+#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
+
MODULE_AUTHOR("Microsemi");
MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
DRIVER_VERSION);
@@ -55,12 +59,9 @@ MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
-
-static char *hpe_branded_controller = "HPE Smart Array Controller";
-static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
-
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ctrl_offline_worker(struct work_struct *work);
+static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -72,7 +73,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -81,12 +82,66 @@ static struct scsi_transport_template *pqi_sas_transport_template;
static atomic_t pqi_controller_count = ATOMIC_INIT(0);
+enum pqi_lockup_action {
+ NONE,
+ REBOOT,
+ PANIC
+};
+
+static enum pqi_lockup_action pqi_lockup_action = NONE;
+
+static struct {
+ enum pqi_lockup_action action;
+ char *name;
+} pqi_lockup_actions[] = {
+ {
+ .action = NONE,
+ .name = "none",
+ },
+ {
+ .action = REBOOT,
+ .name = "reboot",
+ },
+ {
+ .action = PANIC,
+ .name = "panic",
+ },
+};
+
+static unsigned int pqi_supported_event_types[] = {
+ PQI_EVENT_TYPE_HOTPLUG,
+ PQI_EVENT_TYPE_HARDWARE,
+ PQI_EVENT_TYPE_PHYSICAL_DEVICE,
+ PQI_EVENT_TYPE_LOGICAL_DEVICE,
+ PQI_EVENT_TYPE_AIO_STATE_CHANGE,
+ PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
+};
+
static int pqi_disable_device_id_wildcards;
module_param_named(disable_device_id_wildcards,
- pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
+ pqi_disable_device_id_wildcards, int, 0644);
MODULE_PARM_DESC(disable_device_id_wildcards,
"Disable device ID wildcards.");
+static int pqi_disable_heartbeat;
+module_param_named(disable_heartbeat,
+ pqi_disable_heartbeat, int, 0644);
+MODULE_PARM_DESC(disable_heartbeat,
+ "Disable heartbeat.");
+
+static int pqi_disable_ctrl_shutdown;
+module_param_named(disable_ctrl_shutdown,
+ pqi_disable_ctrl_shutdown, int, 0644);
+MODULE_PARM_DESC(disable_ctrl_shutdown,
+ "Disable controller shutdown when controller locked up.");
+
+static char *pqi_lockup_action_param;
+module_param_named(lockup_action,
+ pqi_lockup_action_param, charp, 0644);
+MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
+ "\t\tSupported: none, reboot, panic\n"
+ "\t\tDefault: none");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -102,7 +157,7 @@ static char *pqi_raid_level_to_string(u8 raid_level)
if (raid_level < ARRAY_SIZE(raid_levels))
return raid_levels[raid_level];
- return "";
+ return "RAID UNKNOWN";
}
#define SA_RAID_0 0
@@ -117,6 +172,7 @@ static char *pqi_raid_level_to_string(u8 raid_level)
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
+ pqi_prep_for_scsi_done(scmd);
scmd->scsi_done(scmd);
}
@@ -137,6 +193,11 @@ static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
return !device->is_physical_device;
}
+static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
+{
+ return scsi3addr[2] != 0;
+}
+
static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
{
return !ctrl_info->controller_online;
@@ -166,12 +227,124 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
-#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
+static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_requests = true;
+ scsi_block_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_requests = false;
+ wake_up_all(&ctrl_info->block_requests_wait);
+ pqi_retry_raid_bypass_requests(ctrl_info);
+ scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
+static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
+ unsigned long timeout_msecs)
+{
+ unsigned long remaining_msecs;
+
+ if (!pqi_ctrl_blocked(ctrl_info))
+ return timeout_msecs;
+
+ atomic_inc(&ctrl_info->num_blocked_threads);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ wait_event(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info));
+ remaining_msecs = timeout_msecs;
+ } else {
+ unsigned long remaining_jiffies;
+
+ remaining_jiffies =
+ wait_event_timeout(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info),
+ msecs_to_jiffies(timeout_msecs));
+ remaining_msecs = jiffies_to_msecs(remaining_jiffies);
+ }
+
+ atomic_dec(&ctrl_info->num_blocked_threads);
+
+ return remaining_msecs;
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->num_busy_threads) >
+ atomic_read(&ctrl_info->num_blocked_threads))
+ usleep_range(1000, 2000);
+}
+
+static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
+{
+ return device->device_offline;
+}
+
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
+{
+ device->in_reset = true;
+}
+
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
+{
+ device->in_reset = false;
+}
+
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+{
+ return device->in_reset;
+}
+
+static inline void pqi_schedule_rescan_worker_with_delay(
+ struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+{
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
+ schedule_delayed_work(&ctrl_info->rescan_work, delay);
+}
static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{
- schedule_delayed_work(&ctrl_info->rescan_work,
- PQI_RESCAN_WORK_INTERVAL);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
+}
+
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
+
+static inline void pqi_schedule_rescan_worker_delayed(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
+}
+
+static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_delayed_work_sync(&ctrl_info->rescan_work);
+}
+
+static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!ctrl_info->heartbeat_counter)
+ return 0;
+
+ return readl(ctrl_info->heartbeat_counter);
}
static int pqi_map_single(struct pci_dev *pci_dev,
@@ -280,7 +453,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
default:
dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
cmd);
- WARN_ON(cmd);
break;
}
@@ -305,6 +477,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, pci_dir);
}
+static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
+{
+ io_request->scmd = NULL;
+ io_request->status = 0;
+ io_request->error_info = NULL;
+ io_request->raid_bypass = false;
+}
+
static struct pqi_io_request *pqi_alloc_io_request(
struct pqi_ctrl_info *ctrl_info)
{
@@ -322,9 +502,7 @@ static struct pqi_io_request *pqi_alloc_io_request(
/* benignly racy */
ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
- io_request->scmd = NULL;
- io_request->status = 0;
- io_request->error_info = NULL;
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -500,7 +678,7 @@ static int pqi_write_driver_version_to_host_wellness(
buffer->driver_version_tag[1] = 'V';
put_unaligned_le16(sizeof(buffer->driver_version),
&buffer->driver_version_length);
- strncpy(buffer->driver_version, DRIVER_VERSION,
+ strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
sizeof(buffer->driver_version) - 1);
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
buffer->end_tag[0] = 'Z';
@@ -586,6 +764,9 @@ static void pqi_update_time_worker(struct work_struct *work)
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
update_time_work);
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
rc = pqi_write_current_time_to_host_wellness(ctrl_info);
if (rc)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -601,6 +782,12 @@ static inline void pqi_schedule_update_time_worker(
schedule_delayed_work(&ctrl_info->update_time_work, 0);
}
+static inline void pqi_cancel_update_time_worker(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_delayed_work_sync(&ctrl_info->update_time_work);
+}
+
static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
@@ -771,6 +958,9 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
{
u8 *scsi3addr;
u32 lunid;
+ int bus;
+ int target;
+ int lun;
scsi3addr = device->scsi3addr;
lunid = get_unaligned_le32(scsi3addr);
@@ -783,8 +973,16 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
}
if (pqi_is_logical_device(device)) {
- pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
- lunid & 0x3fff);
+ if (device->is_external_raid_device) {
+ bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
+ target = (lunid >> 16) & 0x3fff;
+ lun = lunid & 0xff;
+ } else {
+ bus = PQI_RAID_VOLUME_BUS;
+ target = 0;
+ lun = lunid & 0x3fff;
+ }
+ pqi_set_bus_target_lun(device, bus, target, lun);
device->target_lun_valid = true;
return;
}
@@ -878,7 +1076,10 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
return 0;
bad_raid_map:
- dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d %s\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target, device->lun, err_msg);
return -EINVAL;
}
@@ -924,35 +1125,33 @@ error:
return rc;
}
-static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
+static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- u8 offload_status;
+ u8 bypass_status;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return;
rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
- VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
+ VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
if (rc)
goto out;
-#define OFFLOAD_STATUS_BYTE 4
-#define OFFLOAD_CONFIGURED_BIT 0x1
-#define OFFLOAD_ENABLED_BIT 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
- offload_status = buffer[OFFLOAD_STATUS_BYTE];
- device->offload_configured =
- !!(offload_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_configured) {
- device->offload_enabled_pending =
- !!(offload_status & OFFLOAD_ENABLED_BIT);
- if (pqi_get_raid_map(ctrl_info, device))
- device->offload_enabled_pending = false;
- }
+ bypass_status = buffer[RAID_BYPASS_STATUS];
+ device->raid_bypass_configured =
+ (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
+ if (device->raid_bypass_configured &&
+ (bypass_status & RAID_BYPASS_ENABLED) &&
+ pqi_get_raid_map(ctrl_info, device) == 0)
+ device->raid_bypass_enabled = true;
out:
kfree(buffer);
@@ -1016,15 +1215,19 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
scsi_sanitize_inquiry_string(&buffer[16], 16);
device->devtype = buffer[0] & 0x1f;
- memcpy(device->vendor, &buffer[8],
- sizeof(device->vendor));
- memcpy(device->model, &buffer[16],
- sizeof(device->model));
+ memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
+ memcpy(device->model, &buffer[16], sizeof(device->model));
if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
- pqi_get_raid_level(ctrl_info, device);
- pqi_get_offload_status(ctrl_info, device);
- pqi_get_volume_status(ctrl_info, device);
+ if (device->is_external_raid_device) {
+ device->raid_level = SA_RAID_UNKNOWN;
+ device->volume_status = CISS_LV_OK;
+ device->volume_offline = false;
+ } else {
+ pqi_get_raid_level(ctrl_info, device);
+ pqi_get_raid_bypass_status(ctrl_info, device);
+ pqi_get_volume_status(ctrl_info, device);
+ }
}
out:
@@ -1138,8 +1341,7 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
status = "Volume undergoing encryption re-keying process";
break;
case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
- status =
- "Encrypted volume inaccessible - disabled on ctrl";
+ status = "Volume encrypted but encryption is disabled";
break;
case CISS_LV_PENDING_ENCRYPTION:
status = "Volume pending migration to encrypted state";
@@ -1166,85 +1368,6 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
device->bus, device->target, device->lun, status);
}
-static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
- struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
-{
- struct pqi_scsi_dev *device;
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
- continue;
- if (pqi_is_logical_device(device))
- continue;
- if (device->aio_handle == aio_handle)
- return device;
- }
-
- return NULL;
-}
-
-static void pqi_update_logical_drive_queue_depth(
- struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
-{
- unsigned int i;
- struct raid_map *raid_map;
- struct raid_map_disk_data *disk_data;
- struct pqi_scsi_dev *phys_disk;
- unsigned int num_phys_disks;
- unsigned int num_raid_map_entries;
- unsigned int queue_depth;
-
- logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
-
- raid_map = logical_drive->raid_map;
- if (!raid_map)
- return;
-
- disk_data = raid_map->disk_data;
- num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
- (get_unaligned_le16(&raid_map->data_disks_per_row) +
- get_unaligned_le16(&raid_map->metadata_disks_per_row));
- num_raid_map_entries = num_phys_disks *
- get_unaligned_le16(&raid_map->row_cnt);
-
- queue_depth = 0;
- for (i = 0; i < num_raid_map_entries; i++) {
- phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
- disk_data[i].aio_handle);
-
- if (!phys_disk) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "failed to find physical disk for logical drive %016llx\n",
- get_unaligned_be64(logical_drive->scsi3addr));
- logical_drive->offload_enabled = false;
- logical_drive->offload_enabled_pending = false;
- kfree(raid_map);
- logical_drive->raid_map = NULL;
- return;
- }
-
- queue_depth += phys_disk->queue_depth;
- }
-
- logical_drive->queue_depth = queue_depth;
-}
-
-static void pqi_update_all_logical_drive_queue_depths(
- struct pqi_ctrl_info *ctrl_info)
-{
- struct pqi_scsi_dev *device;
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
- continue;
- if (!pqi_is_logical_device(device))
- continue;
- pqi_update_logical_drive_queue_depth(ctrl_info, device);
- }
-}
-
static void pqi_rescan_worker(struct work_struct *work)
{
struct pqi_ctrl_info *ctrl_info;
@@ -1336,24 +1459,65 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
return DEVICE_NOT_FOUND;
}
+#define PQI_DEV_INFO_BUFFER_LENGTH 128
+
static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
char *action, struct pqi_scsi_dev *device)
{
- dev_info(&ctrl_info->pci_dev->dev,
- "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
- action,
- ctrl_info->scsi_host->host_no,
- device->bus,
- device->target,
- device->lun,
+ ssize_t count;
+ char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
+
+ count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+ "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
+
+ if (device->target_lun_valid)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "%d:%d",
+ device->target,
+ device->lun);
+ else
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "-:-");
+
+ if (pqi_is_logical_device(device))
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %08x%08x",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]));
+ else
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx", device->sas_address);
+
+ count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %s %.8s %.16s ",
scsi_device_type(device->devtype),
device->vendor,
- device->model,
- pqi_raid_level_to_string(device->raid_level),
- device->offload_configured ? '+' : '-',
- device->offload_enabled_pending ? '+' : '-',
- device->expose_device ? '+' : '-',
- device->queue_depth);
+ device->model);
+
+ if (pqi_is_logical_device(device)) {
+ if (device->devtype == TYPE_DISK)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "SSDSmartPathCap%c En%c %-12s",
+ device->raid_bypass_configured ? '+' : '-',
+ device->raid_bypass_enabled ? '+' : '-',
+ pqi_raid_level_to_string(device->raid_level));
+ } else {
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "AIO%c", device->aio_enabled ? '+' : '-');
+ if (device->devtype == TYPE_DISK ||
+ device->devtype == TYPE_ZBC)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " qd=%-6d", device->queue_depth);
+ }
+
+ dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
/* Assumes the SCSI device list lock is held. */
@@ -1373,8 +1537,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->expose_device = new_device->expose_device;
- existing_device->no_uld_attach = new_device->no_uld_attach;
+ existing_device->is_external_raid_device =
+ new_device->is_external_raid_device;
existing_device->aio_enabled = new_device->aio_enabled;
memcpy(existing_device->vendor, new_device->vendor,
sizeof(existing_device->vendor));
@@ -1392,13 +1556,13 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
sizeof(existing_device->phys_connector));
- existing_device->offload_configured = new_device->offload_configured;
- existing_device->offload_enabled = false;
- existing_device->offload_enabled_pending =
- new_device->offload_enabled_pending;
existing_device->offload_to_mirror = 0;
kfree(existing_device->raid_map);
existing_device->raid_map = new_device->raid_map;
+ existing_device->raid_bypass_configured =
+ new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled =
+ new_device->raid_bypass_enabled;
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
@@ -1440,11 +1604,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device;
struct pqi_scsi_dev *next;
struct pqi_scsi_dev *matching_device;
- struct list_head add_list;
- struct list_head delete_list;
-
- INIT_LIST_HEAD(&add_list);
- INIT_LIST_HEAD(&delete_list);
+ LIST_HEAD(add_list);
+ LIST_HEAD(delete_list);
/*
* The idea here is to do as little work as possible while holding the
@@ -1490,9 +1651,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
device->new_device = true;
break;
- default:
- WARN_ON(find_result);
- break;
}
}
@@ -1519,26 +1677,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->keep_device = true;
}
- pqi_update_all_logical_drive_queue_depths(ctrl_info);
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- device->offload_enabled =
- device->offload_enabled_pending;
-
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
/* Remove all devices that have gone away. */
list_for_each_entry_safe(device, next, &delete_list,
delete_list_entry) {
- if (device->sdev)
- pqi_remove_device(ctrl_info, device);
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
} else {
pqi_dev_info(ctrl_info, "removed", device);
}
+ if (device->sdev)
+ pqi_remove_device(ctrl_info, device);
list_del(&device->delete_list_entry);
pqi_free_device(device);
}
@@ -1559,7 +1710,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
- if (device->expose_device && !device->sdev) {
+ if (!device->sdev) {
+ pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
if (rc) {
dev_warn(&ctrl_info->pci_dev->dev,
@@ -1568,10 +1720,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->bus, device->target,
device->lun);
pqi_fixup_botched_add(ctrl_info, device);
- continue;
}
}
- pqi_dev_info(ctrl_info, "added", device);
}
}
@@ -1591,8 +1741,8 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
/*
* Only support the HBA controller itself as a RAID
* controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, MSA500
- * or similar), we don't support it.
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
*/
if (pqi_is_hba_lunid(device->scsi3addr))
is_supported = true;
@@ -1602,43 +1752,20 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
return is_supported;
}
-static inline bool pqi_skip_device(u8 *scsi3addr,
- struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+static inline bool pqi_skip_device(u8 *scsi3addr)
{
- u8 device_flags;
-
- if (!MASKED_DEVICE(scsi3addr))
- return false;
-
- /* The device is masked. */
-
- device_flags = phys_lun_ext_entry->device_flags;
-
- if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
- /*
- * It's a non-disk device. We ignore all devices of this type
- * when they're masked.
- */
+ /* Ignore all masked devices. */
+ if (MASKED_DEVICE(scsi3addr))
return true;
- }
return false;
}
-static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
-{
- /* Expose all devices except for physical devices that are masked. */
- if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
- return false;
-
- return true;
-}
-
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int i;
int rc;
- struct list_head new_device_list_head;
+ LIST_HEAD(new_device_list_head);
struct report_phys_lun_extended *physdev_list = NULL;
struct report_log_lun_extended *logdev_list = NULL;
struct report_phys_lun_extended_entry *phys_lun_ext_entry;
@@ -1654,9 +1781,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
bool is_physical_device;
u8 *scsi3addr;
static char *out_of_memory_msg =
- "out of memory, device discovery stopped";
-
- INIT_LIST_HEAD(&new_device_list_head);
+ "failed to allocate memory, device discovery stopped";
rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
if (rc)
@@ -1732,8 +1857,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
scsi3addr = log_lun_ext_entry->lunid;
}
- if (is_physical_device &&
- pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+ if (is_physical_device && pqi_skip_device(scsi3addr))
continue;
if (device)
@@ -1744,7 +1868,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
- device->raid_level = SA_RAID_UNKNOWN;
+ if (!is_physical_device)
+ device->is_external_raid_device =
+ pqi_is_external_raid_addr(scsi3addr);
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device);
@@ -1754,9 +1880,16 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
goto out;
}
if (rc) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "obtaining device info failed, skipping device %016llx\n",
- get_unaligned_be64(device->scsi3addr));
+ if (device->is_physical_device)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "obtaining device info failed, skipping physical device %016llx\n",
+ get_unaligned_be64(
+ &phys_lun_ext_entry->wwid));
+ else
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "obtaining device info failed, skipping logical device %08x%08x\n",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]));
rc = 0;
continue;
}
@@ -1766,8 +1899,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
pqi_assign_bus_target_lun(device);
- device->expose_device = pqi_expose_device(device);
-
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
@@ -1823,19 +1954,25 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ while (1) {
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
+ struct pqi_scsi_dev, scsi_device_list_entry);
+ if (device)
+ list_del(&device->scsi_device_list_entry);
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+
+ if (!device)
+ break;
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
if (device->sdev)
pqi_remove_device(ctrl_info, device);
- list_del(&device->scsi_device_list_entry);
pqi_free_device(device);
}
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -1849,7 +1986,7 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = pqi_update_scsi_devices(ctrl_info);
if (rc)
- pqi_schedule_rescan_worker(ctrl_info);
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
mutex_unlock(&ctrl_info->scan_mutex);
@@ -1873,6 +2010,18 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
return !mutex_is_locked(&ctrl_info->scan_mutex);
}
+static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->scan_mutex);
+ mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
static inline void pqi_set_encryption_info(
struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
u64 first_block)
@@ -1895,7 +2044,7 @@ static inline void pqi_set_encryption_info(
}
/*
- * Attempt to perform offload RAID mapping for a logical volume I/O.
+ * Attempt to perform RAID bypass mapping for a logical volume I/O.
*/
#define PQI_RAID_BYPASS_INELIGIBLE 1
@@ -2227,7 +2376,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
}
return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
- cdb, cdb_length, queue_group, encryption_info_ptr);
+ cdb, cdb_length, queue_group, encryption_info_ptr, true);
}
#define PQI_STATUS_IDLE 0x0
@@ -2299,23 +2448,26 @@ static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
struct pqi_scsi_dev *device;
device = io_request->scmd->device->hostdata;
- device->offload_enabled = false;
+ device->raid_bypass_enabled = false;
+ device->aio_enabled = false;
}
-static inline void pqi_take_device_offline(struct scsi_device *sdev)
+static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- if (scsi_device_online(sdev)) {
- scsi_device_set_state(sdev, SDEV_OFFLINE);
- ctrl_info = shost_to_hba(sdev->host);
- schedule_delayed_work(&ctrl_info->rescan_work, 0);
- device = sdev->hostdata;
- dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun);
- }
+ device = sdev->hostdata;
+ if (device->device_offline)
+ return;
+
+ device->device_offline = true;
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ ctrl_info = shost_to_hba(sdev->host);
+ pqi_schedule_rescan_worker(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
+ path, ctrl_info->scsi_host->host_no, device->bus,
+ device->target, device->lun);
}
static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
@@ -2337,13 +2489,43 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
scsi_status = error_info->status;
host_byte = DID_OK;
- if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
+ switch (error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
xfer_count =
get_unaligned_le32(&error_info->data_out_transferred);
residual_count = scsi_bufflen(scmd) - xfer_count;
scsi_set_resid(scmd, residual_count);
if (xfer_count < scmd->underflow)
host_byte = DID_SOFT_ERROR;
+ break;
+ case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
+ case PQI_DATA_IN_OUT_ABORTED:
+ host_byte = DID_ABORT;
+ break;
+ case PQI_DATA_IN_OUT_TIMEOUT:
+ host_byte = DID_TIME_OUT;
+ break;
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
+ case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
+ case PQI_DATA_IN_OUT_ERROR:
+ case PQI_DATA_IN_OUT_HARDWARE_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
+ case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
+ case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
+ case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
+ default:
+ host_byte = DID_ERROR;
+ break;
}
sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
@@ -2360,7 +2542,7 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sshdr.sense_key == HARDWARE_ERROR &&
sshdr.asc == 0x3e &&
sshdr.ascq == 0x1) {
- pqi_take_device_offline(scmd->device);
+ pqi_take_device_offline(scmd->device, "RAID");
host_byte = DID_NO_CONNECT;
}
@@ -2419,9 +2601,11 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
- device_offline = true;
- pqi_take_device_offline(scmd->device);
- host_byte = DID_NO_CONNECT;
+ if (!io_request->raid_bypass) {
+ device_offline = true;
+ pqi_take_device_offline(scmd->device, "AIO");
+ host_byte = DID_NO_CONNECT;
+ }
scsi_status = SAM_STAT_CHECK_CONDITION;
break;
case PQI_AIO_STATUS_IO_ERROR:
@@ -2547,7 +2731,6 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unexpected IU type: 0x%x\n",
response->header.iu_type);
- WARN_ON(response->header.iu_type);
break;
}
@@ -2583,23 +2766,18 @@ static inline unsigned int pqi_num_elements_free(unsigned int pi,
return elements_in_queue - num_elements_used - 1;
}
-#define PQI_EVENT_ACK_TIMEOUT 30
-
-static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
+static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
struct pqi_event_acknowledge_request *iu, size_t iu_length)
{
pqi_index_t iq_pi;
pqi_index_t iq_ci;
unsigned long flags;
void *next_element;
- unsigned long timeout;
struct pqi_queue_group *queue_group;
queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
- timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
-
while (1) {
spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
@@ -2613,11 +2791,8 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(
&queue_group->submit_lock[RAID_PATH], flags);
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "sending event acknowledge timed out\n");
+ if (pqi_ctrl_offline(ctrl_info))
return;
- }
}
next_element = queue_group->iq_element_array[RAID_PATH] +
@@ -2626,7 +2801,6 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
memcpy(next_element, iu, iu_length);
iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
-
queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
/*
@@ -2652,152 +2826,105 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
request.event_id = event->event_id;
request.additional_event_id = event->additional_event_id;
- pqi_start_event_ack(ctrl_info, &request, sizeof(request));
+ pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
struct pqi_ctrl_info *ctrl_info;
- struct pqi_event *pending_event;
- bool got_non_heartbeat_event = false;
+ struct pqi_event *event;
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
- pending_event = ctrl_info->pending_events;
- for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
- if (pending_event->pending) {
- pending_event->pending = false;
- pqi_acknowledge_event(ctrl_info, pending_event);
- if (i != PQI_EVENT_HEARTBEAT)
- got_non_heartbeat_event = true;
- }
- pending_event++;
- }
-
- if (got_non_heartbeat_event)
- pqi_schedule_rescan_worker(ctrl_info);
-}
-
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned int i;
- unsigned int path;
- struct pqi_queue_group *queue_group;
- unsigned long flags;
- struct pqi_io_request *io_request;
- struct pqi_io_request *next;
- struct scsi_cmnd *scmd;
-
- ctrl_info->controller_online = false;
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
-
- for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- queue_group = &ctrl_info->queue_groups[i];
-
- for (path = 0; path < 2; path++) {
- spin_lock_irqsave(
- &queue_group->submit_lock[path], flags);
-
- list_for_each_entry_safe(io_request, next,
- &queue_group->request_list[path],
- request_list_entry) {
-
- scmd = io_request->scmd;
- if (scmd) {
- set_host_byte(scmd, DID_NO_CONNECT);
- pqi_scsi_done(scmd);
- }
+ pqi_ctrl_busy(ctrl_info);
+ pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+ if (pqi_ctrl_offline(ctrl_info))
+ goto out;
- list_del(&io_request->request_list_entry);
- }
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
+ event = ctrl_info->events;
+ for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ if (event->pending) {
+ event->pending = false;
+ pqi_acknowledge_event(ctrl_info, event);
}
+ event++;
}
+
+out:
+ pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
-#define PQI_MAX_HEARTBEAT_REQUESTS 5
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(unsigned long data)
{
int num_interrupts;
+ u32 heartbeat_count;
struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
num_interrupts = atomic_read(&ctrl_info->num_interrupts);
+ heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
if (num_interrupts == ctrl_info->previous_num_interrupts) {
- ctrl_info->num_heartbeats_requested++;
- if (ctrl_info->num_heartbeats_requested >
- PQI_MAX_HEARTBEAT_REQUESTS) {
+ if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "no heartbeat detected - last heartbeat count: %u\n",
+ heartbeat_count);
pqi_take_ctrl_offline(ctrl_info);
return;
}
- ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
- schedule_work(&ctrl_info->event_work);
} else {
- ctrl_info->num_heartbeats_requested = 0;
+ ctrl_info->previous_num_interrupts = num_interrupts;
}
- ctrl_info->previous_num_interrupts = num_interrupts;
+ ctrl_info->previous_heartbeat_count = heartbeat_count;
mod_timer(&ctrl_info->heartbeat_timer,
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
}
static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
+ if (!ctrl_info->heartbeat_counter)
+ return;
+
ctrl_info->previous_num_interrupts =
atomic_read(&ctrl_info->num_interrupts);
+ ctrl_info->previous_heartbeat_count =
+ pqi_read_heartbeat_counter(ctrl_info);
- init_timer(&ctrl_info->heartbeat_timer);
ctrl_info->heartbeat_timer.expires =
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
add_timer(&ctrl_info->heartbeat_timer);
- ctrl_info->heartbeat_timer_started = true;
}
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
- if (ctrl_info->heartbeat_timer_started)
- del_timer_sync(&ctrl_info->heartbeat_timer);
+ del_timer_sync(&ctrl_info->heartbeat_timer);
}
-static int pqi_event_type_to_event_index(unsigned int event_type)
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
int index;
- switch (event_type) {
- case PQI_EVENT_TYPE_HEARTBEAT:
- index = PQI_EVENT_HEARTBEAT;
- break;
- case PQI_EVENT_TYPE_HOTPLUG:
- index = PQI_EVENT_HOTPLUG;
- break;
- case PQI_EVENT_TYPE_HARDWARE:
- index = PQI_EVENT_HARDWARE;
- break;
- case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
- index = PQI_EVENT_PHYSICAL_DEVICE;
- break;
- case PQI_EVENT_TYPE_LOGICAL_DEVICE:
- index = PQI_EVENT_LOGICAL_DEVICE;
- break;
- case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
- index = PQI_EVENT_AIO_STATE_CHANGE;
- break;
- case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
- index = PQI_EVENT_AIO_CONFIG_CHANGE;
- break;
- default:
- index = -1;
- break;
- }
+ for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+ if (event_type == pqi_supported_event_types[index])
+ return index;
- return index;
+ return -1;
+}
+
+static inline bool pqi_is_supported_event(unsigned int event_type)
+{
+ return pqi_event_type_to_event_index(event_type) != -1;
}
static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
@@ -2807,13 +2934,11 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
struct pqi_event_response *response;
- struct pqi_event *pending_event;
- bool need_delayed_work;
+ struct pqi_event *event;
int event_index;
event_queue = &ctrl_info->event_queue;
num_events = 0;
- need_delayed_work = false;
oq_ci = event_queue->oq_ci_copy;
while (1) {
@@ -2830,17 +2955,12 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
if (event_index >= 0) {
if (response->request_acknowlege) {
- pending_event =
- &ctrl_info->pending_events[event_index];
- pending_event->event_type =
- response->event_type;
- pending_event->event_id = response->event_id;
- pending_event->additional_event_id =
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = response->event_id;
+ event->additional_event_id =
response->additional_event_id;
- if (event_index != PQI_EVENT_HEARTBEAT) {
- pending_event->pending = true;
- need_delayed_work = true;
- }
}
}
@@ -2850,14 +2970,112 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
if (num_events) {
event_queue->oq_ci_copy = oq_ci;
writel(oq_ci, event_queue->oq_ci);
-
- if (need_delayed_work)
- schedule_work(&ctrl_info->event_work);
+ schedule_work(&ctrl_info->event_work);
}
return num_events;
}
+#define PQI_LEGACY_INTX_MASK 0x1
+
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
+ bool enable_intx)
+{
+ u32 intx_mask;
+ struct pqi_device_registers __iomem *pqi_registers;
+ volatile void __iomem *register_addr;
+
+ pqi_registers = ctrl_info->pqi_registers;
+
+ if (enable_intx)
+ register_addr = &pqi_registers->legacy_intx_mask_clear;
+ else
+ register_addr = &pqi_registers->legacy_intx_mask_set;
+
+ intx_mask = readl(register_addr);
+ intx_mask |= PQI_LEGACY_INTX_MASK;
+ writel(intx_mask, register_addr);
+}
+
+static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_irq_mode new_mode)
+{
+ switch (ctrl_info->irq_mode) {
+ case IRQ_MODE_MSIX:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ break;
+ case IRQ_MODE_INTX:
+ pqi_configure_legacy_intx(ctrl_info, true);
+ sis_disable_msix(ctrl_info);
+ sis_enable_intx(ctrl_info);
+ break;
+ case IRQ_MODE_NONE:
+ sis_disable_msix(ctrl_info);
+ break;
+ }
+ break;
+ case IRQ_MODE_INTX:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ pqi_configure_legacy_intx(ctrl_info, false);
+ sis_disable_intx(ctrl_info);
+ sis_enable_msix(ctrl_info);
+ break;
+ case IRQ_MODE_INTX:
+ break;
+ case IRQ_MODE_NONE:
+ pqi_configure_legacy_intx(ctrl_info, false);
+ sis_disable_intx(ctrl_info);
+ break;
+ }
+ break;
+ case IRQ_MODE_NONE:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ sis_enable_msix(ctrl_info);
+ break;
+ case IRQ_MODE_INTX:
+ pqi_configure_legacy_intx(ctrl_info, true);
+ sis_enable_intx(ctrl_info);
+ break;
+ case IRQ_MODE_NONE:
+ break;
+ }
+ break;
+ }
+
+ ctrl_info->irq_mode = new_mode;
+}
+
+#define PQI_LEGACY_INTX_PENDING 0x1
+
+static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
+{
+ bool valid_irq;
+ u32 intx_status;
+
+ switch (ctrl_info->irq_mode) {
+ case IRQ_MODE_MSIX:
+ valid_irq = true;
+ break;
+ case IRQ_MODE_INTX:
+ intx_status =
+ readl(&ctrl_info->pqi_registers->legacy_intx_status);
+ if (intx_status & PQI_LEGACY_INTX_PENDING)
+ valid_irq = true;
+ else
+ valid_irq = false;
+ break;
+ case IRQ_MODE_NONE:
+ default:
+ valid_irq = false;
+ break;
+ }
+
+ return valid_irq;
+}
+
static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
@@ -2867,7 +3085,7 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
queue_group = data;
ctrl_info = queue_group->ctrl_info;
- if (!ctrl_info || !queue_group->oq_ci)
+ if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
@@ -2886,19 +3104,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
- struct pci_dev *pdev = ctrl_info->pci_dev;
+ struct pci_dev *pci_dev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = pci_irq_vector(pdev, 0);
+ ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&pdev->dev,
+ dev_err(&pci_dev->dev,
"irq %u init failed with error %d\n",
- pci_irq_vector(pdev, i), rc);
+ pci_irq_vector(pci_dev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2907,23 +3125,44 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
+static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+
+ ctrl_info->num_msix_vectors_initialized = 0;
+}
+
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- int ret;
+ int num_vectors_enabled;
- ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
- if (ret < 0) {
+ if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n", ret);
- return ret;
+ "MSI-X init failed with error %d\n",
+ num_vectors_enabled);
+ return num_vectors_enabled;
}
- ctrl_info->num_msix_vectors_enabled = ret;
+ ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
+ ctrl_info->irq_mode = IRQ_MODE_MSIX;
return 0;
}
+static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+ if (ctrl_info->num_msix_vectors_enabled) {
+ pci_free_irq_vectors(ctrl_info->pci_dev);
+ ctrl_info->num_msix_vectors_enabled = 0;
+ }
+}
+
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -2976,16 +3215,15 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
alloc_length = (size_t)aligned_pointer +
PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+ alloc_length += PQI_EXTRA_SGL_MEMORY;
+
ctrl_info->queue_memory_base =
dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
alloc_length,
&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
- if (!ctrl_info->queue_memory_base) {
- dev_err(&ctrl_info->pci_dev->dev,
- "failed to allocate memory for PQI admin queues\n");
+ if (!ctrl_info->queue_memory_base)
return -ENOMEM;
- }
ctrl_info->queue_memory_length = alloc_length;
@@ -3235,6 +3473,8 @@ static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
writel(iq_pi, admin_queues->iq_pi);
}
+#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
+
static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
struct pqi_general_admin_response *response)
{
@@ -3246,7 +3486,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (3 * HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = *admin_queues->oq_pi;
@@ -3257,6 +3497,8 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
"timed out waiting for admin response\n");
return -ETIMEDOUT;
}
+ if (!sis_is_firmware_running(ctrl_info))
+ return -ENXIO;
usleep_range(1000, 2000);
}
@@ -3287,9 +3529,11 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&queue_group->submit_lock[path], flags);
- if (io_request)
+ if (io_request) {
+ io_request->queue_group = queue_group;
list_add_tail(&io_request->request_list_entry,
&queue_group->request_list[path]);
+ }
iq_pi = queue_group->iq_pi_copy[path];
@@ -3348,6 +3592,30 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
}
+#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
+
+static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
+ struct completion *wait)
+{
+ int rc;
+
+ while (1) {
+ if (wait_for_completion_io_timeout(wait,
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
+ rc = 0;
+ break;
+ }
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ENXIO;
+ break;
+ }
+ }
+
+ return rc;
+}
+
static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -3371,7 +3639,7 @@ static int pqi_submit_raid_request_synchronous_with_io_request(
io_request);
if (timeout_msecs == NO_TIMEOUT) {
- wait_for_completion_io(&wait);
+ pqi_wait_for_completion_io(ctrl_info, &wait);
} else {
if (!wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(timeout_msecs))) {
@@ -3418,6 +3686,18 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
}
}
+ pqi_ctrl_busy(ctrl_info);
+ timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
+ if (timeout_msecs == 0) {
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -3458,6 +3738,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+out:
+ pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -3688,16 +3970,15 @@ static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
+ unsigned int group_number)
{
- unsigned int i;
int rc;
struct pqi_queue_group *queue_group;
struct pqi_general_admin_request request;
struct pqi_general_admin_response response;
- i = ctrl_info->num_active_queue_groups;
- queue_group = &ctrl_info->queue_groups[i];
+ queue_group = &ctrl_info->queue_groups[group_number];
/*
* Create IQ (Inbound Queue - host to device queue) for
@@ -3827,8 +4108,6 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
get_unaligned_le64(
&response.data.create_operational_oq.oq_ci_offset);
- ctrl_info->num_active_queue_groups++;
-
return 0;
delete_inbound_queue_aio:
@@ -3855,7 +4134,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
}
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- rc = pqi_create_queue_group(ctrl_info);
+ rc = pqi_create_queue_group(ctrl_info, i);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
"error creating queue group number %u/%u\n",
@@ -3871,11 +4150,13 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
(offsetof(struct pqi_event_config, descriptors) + \
(PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
-static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
+static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
+ bool enable_events)
{
int rc;
unsigned int i;
struct pqi_event_config *event_config;
+ struct pqi_event_descriptor *event_descriptor;
struct pqi_general_management_request request;
event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
@@ -3909,9 +4190,15 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- for (i = 0; i < event_config->num_event_descriptors; i++)
- put_unaligned_le16(ctrl_info->event_queue.oq_id,
- &event_config->descriptors[i].oq_id);
+ for (i = 0; i < event_config->num_event_descriptors; i++) {
+ event_descriptor = &event_config->descriptors[i];
+ if (enable_events &&
+ pqi_is_supported_event(event_descriptor->event_type))
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ &event_descriptor->oq_id);
+ else
+ put_unaligned_le16(0, &event_descriptor->oq_id);
+ }
memset(&request, 0, sizeof(request));
@@ -3942,6 +4229,16 @@ out:
return rc;
}
+static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
+{
+ return pqi_configure_events(ctrl_info, true);
+}
+
+static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
+{
+ return pqi_configure_events(ctrl_info, false);
+}
+
static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4056,8 +4353,12 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
ctrl_info->error_buffer_length =
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
- max_transfer_size =
- min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
+ if (reset_devices)
+ max_transfer_size = min(ctrl_info->max_transfer_size,
+ PQI_MAX_TRANSFER_SIZE_KDUMP);
+ else
+ max_transfer_size = min(ctrl_info->max_transfer_size,
+ PQI_MAX_TRANSFER_SIZE);
max_sg_entries = max_transfer_size / PAGE_SIZE;
@@ -4069,28 +4370,35 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
ctrl_info->sg_chain_buffer_length =
- max_sg_entries * sizeof(struct pqi_sg_descriptor);
+ (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
+ PQI_EXTRA_SGL_MEMORY;
ctrl_info->sg_tablesize = max_sg_entries;
ctrl_info->max_sectors = max_transfer_size / 512;
}
static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
{
- int num_cpus;
- int max_queue_groups;
int num_queue_groups;
u16 num_elements_per_iq;
u16 num_elements_per_oq;
- max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
- ctrl_info->max_outbound_queues - 1);
- max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
+ if (reset_devices) {
+ num_queue_groups = 1;
+ } else {
+ int num_cpus;
+ int max_queue_groups;
+
+ max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
+ ctrl_info->max_outbound_queues - 1);
+ max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
- num_cpus = num_online_cpus();
- num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
- num_queue_groups = min(num_queue_groups, max_queue_groups);
+ num_cpus = num_online_cpus();
+ num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+ num_queue_groups = min(num_queue_groups, max_queue_groups);
+ }
ctrl_info->num_queue_groups = num_queue_groups;
+ ctrl_info->max_hw_queue_index = num_queue_groups - 1;
/*
* Make sure that the max. inbound IU length is an even multiple
@@ -4276,21 +4584,18 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+static int pqi_raid_submit_scsi_cmd_with_io_request(
+ struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
int rc;
size_t cdb_length;
- struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
- io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
- scmd->host_scribble = (unsigned char *)io_request;
-
request = io_request->iu;
memset(request, 0,
offsetof(struct pqi_raid_path_request, sg_descriptors));
@@ -4355,7 +4660,6 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unknown data direction: %d\n",
scmd->sc_data_direction);
- WARN_ON(scmd->sc_data_direction);
break;
}
@@ -4370,6 +4674,176 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ struct pqi_io_request *io_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+
+ return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
+ device, scmd, queue_group);
+}
+
+static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!pqi_ctrl_blocked(ctrl_info))
+ schedule_work(&ctrl_info->raid_bypass_retry_work);
+}
+
+static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+
+ if (!io_request->raid_bypass)
+ return false;
+
+ scmd = io_request->scmd;
+ if ((scmd->result & 0xff) == SAM_STAT_GOOD)
+ return false;
+ if (host_byte(scmd->result) == DID_NO_CONNECT)
+ return false;
+
+ device = scmd->device->hostdata;
+ if (pqi_device_offline(device))
+ return false;
+
+ ctrl_info = shost_to_hba(scmd->device->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return false;
+
+ return true;
+}
+
+static inline void pqi_add_to_raid_bypass_retry_list(
+ struct pqi_ctrl_info *ctrl_info,
+ struct pqi_io_request *io_request, bool at_head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ if (at_head)
+ list_add(&io_request->request_list_entry,
+ &ctrl_info->raid_bypass_retry_list);
+ else
+ list_add_tail(&io_request->request_list_entry,
+ &ctrl_info->raid_bypass_retry_list);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+}
+
+static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct scsi_cmnd *scmd;
+
+ scmd = io_request->scmd;
+ pqi_free_io_request(io_request);
+ pqi_scsi_done(scmd);
+}
+
+static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_ctrl_info *ctrl_info;
+
+ io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
+ scmd = io_request->scmd;
+ scmd->result = 0;
+ ctrl_info = shost_to_hba(scmd->device->host);
+
+ pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
+ pqi_schedule_bypass_retry(ctrl_info);
+}
+
+static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_queue_group *queue_group;
+
+ scmd = io_request->scmd;
+ device = scmd->device->hostdata;
+ if (pqi_device_in_reset(device)) {
+ pqi_free_io_request(io_request);
+ set_host_byte(scmd, DID_RESET);
+ pqi_scsi_done(scmd);
+ return 0;
+ }
+
+ ctrl_info = shost_to_hba(scmd->device->host);
+ queue_group = io_request->queue_group;
+
+ pqi_reinit_io_request(io_request);
+
+ return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
+ device, scmd, queue_group);
+}
+
+static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ io_request = list_first_entry_or_null(
+ &ctrl_info->raid_bypass_retry_list,
+ struct pqi_io_request, request_list_entry);
+ if (io_request)
+ list_del(&io_request->request_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+
+ return io_request;
+}
+
+static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+
+ pqi_ctrl_busy(ctrl_info);
+
+ while (1) {
+ if (pqi_ctrl_blocked(ctrl_info))
+ break;
+ io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
+ if (!io_request)
+ break;
+ rc = pqi_retry_raid_bypass(io_request);
+ if (rc) {
+ pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
+ true);
+ pqi_schedule_bypass_retry(ctrl_info);
+ break;
+ }
+ }
+
+ pqi_ctrl_unbusy(ctrl_info);
+}
+
+static void pqi_raid_bypass_retry_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info,
+ raid_bypass_retry_work);
+ pqi_retry_raid_bypass_requests(ctrl_info);
+}
+
+static void pqi_clear_all_queued_raid_bypass_retries(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+}
+
static void pqi_aio_io_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -4379,6 +4853,10 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
scsi_dma_unmap(scmd);
if (io_request->status == -EAGAIN)
set_host_byte(scmd, DID_IMM_RETRY);
+ else if (pqi_raid_bypass_retry_needed(io_request)) {
+ pqi_queue_raid_bypass_retry(io_request);
+ return;
+ }
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
@@ -4388,13 +4866,13 @@ static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_queue_group *queue_group)
{
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass)
{
int rc;
struct pqi_io_request *io_request;
@@ -4403,8 +4881,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
-
- scmd->host_scribble = (unsigned char *)io_request;
+ io_request->raid_bypass = raid_bypass;
request = io_request->iu;
memset(request, 0,
@@ -4438,7 +4915,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unknown data direction: %d\n",
scmd->sc_data_direction);
- WARN_ON(scmd->sc_data_direction);
break;
}
@@ -4463,47 +4939,74 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd)
+{
+ u16 hw_queue;
+
+ hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ if (hw_queue > ctrl_info->max_hw_queue_index)
+ hw_queue = 0;
+
+ return hw_queue;
+}
+
+/*
+ * This function gets called just before we hand the completed SCSI request
+ * back to the SML.
+ */
+
+void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
+{
+ struct pqi_scsi_dev *device;
+
+ device = scmd->device->hostdata;
+ atomic_dec(&device->scsi_cmds_outstanding);
+}
+
static int pqi_scsi_queue_command(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- u16 hwq;
+ u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
device = scmd->device->hostdata;
ctrl_info = shost_to_hba(shost);
+ atomic_inc(&device->scsi_cmds_outstanding);
+
if (pqi_ctrl_offline(ctrl_info)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
+ pqi_ctrl_busy(ctrl_info);
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
/*
* This is necessary because the SML doesn't zero out this field during
* error recovery.
*/
scmd->result = 0;
- hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
- if (hwq >= ctrl_info->num_queue_groups)
- hwq = 0;
-
- queue_group = &ctrl_info->queue_groups[hwq];
+ hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
+ queue_group = &ctrl_info->queue_groups[hw_queue];
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
- if (device->offload_enabled &&
+ if (device->raid_bypass_enabled &&
!blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
- if (rc == 0 ||
- rc == SCSI_MLQUEUE_HOST_BUSY ||
- rc == SAM_STAT_CHECK_CONDITION ||
- rc == SAM_STAT_RESERVATION_CONFLICT)
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
raid_bypassed = true;
}
if (!raid_bypassed)
@@ -4518,9 +5021,162 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
queue_group);
}
+out:
+ pqi_ctrl_unbusy(ctrl_info);
+ if (rc)
+ atomic_dec(&device->scsi_cmds_outstanding);
+
return rc;
}
+static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_queue_group *queue_group)
+{
+ unsigned int path;
+ unsigned long flags;
+ bool list_is_empty;
+
+ for (path = 0; path < 2; path++) {
+ while (1) {
+ spin_lock_irqsave(
+ &queue_group->submit_lock[path], flags);
+ list_is_empty =
+ list_empty(&queue_group->request_list[path]);
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ if (list_is_empty)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+ }
+
+ return 0;
+}
+
+static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ pqi_index_t iq_pi;
+ pqi_index_t iq_ci;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
+ if (rc)
+ return rc;
+
+ for (path = 0; path < 2; path++) {
+ iq_pi = queue_group->iq_pi_copy[path];
+
+ while (1) {
+ iq_ci = *queue_group->iq_ci[path];
+ if (iq_ci == iq_pi)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+ struct pqi_io_request *next;
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *scsi_device;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(
+ &queue_group->submit_lock[path], flags);
+
+ list_for_each_entry_safe(io_request, next,
+ &queue_group->request_list[path],
+ request_list_entry) {
+ scmd = io_request->scmd;
+ if (!scmd)
+ continue;
+
+ scsi_device = scmd->device->hostdata;
+ if (scsi_device != device)
+ continue;
+
+ list_del(&io_request->request_list_entry);
+ set_host_byte(scmd, DID_RESET);
+ pqi_scsi_done(scmd);
+ }
+
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ }
+ }
+}
+
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ while (atomic_read(&device->scsi_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+{
+ bool io_pending;
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ while (1) {
+ io_pending = false;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (atomic_read(&device->scsi_cmds_outstanding)) {
+ io_pending = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+
+ if (!io_pending)
+ break;
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -4535,7 +5191,6 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
{
int rc;
- unsigned int wait_secs = 0;
while (1) {
if (wait_for_completion_io_timeout(wait,
@@ -4546,16 +5201,9 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ETIMEDOUT;
+ rc = -ENXIO;
break;
}
-
- wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
-
- dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun, wait_secs);
}
return rc;
@@ -4569,8 +5217,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- down(&ctrl_info->lun_reset_sem);
-
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -4595,7 +5241,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
rc = io_request->status;
pqi_free_io_request(io_request);
- up(&ctrl_info->lun_reset_sem);
return rc;
}
@@ -4607,11 +5252,9 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
{
int rc;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return FAILED;
-
rc = pqi_lun_reset(ctrl_info, device);
+ if (rc == 0)
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device);
return rc == 0 ? SUCCESS : FAILED;
}
@@ -4619,23 +5262,46 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
int rc;
+ struct Scsi_Host *shost;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- ctrl_info = shost_to_hba(scmd->device->host);
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
device = scmd->device->hostdata;
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun);
+ shost->host_no, device->bus, device->target, device->lun);
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_fail_io_queued_for_device(ctrl_info, device);
+ rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device);
+ pqi_ctrl_unblock_requests(ctrl_info);
+
+ if (rc)
+ rc = FAILED;
+ else
+ rc = pqi_device_reset(ctrl_info, device);
+
+ pqi_device_reset_done(device);
- rc = pqi_device_reset(ctrl_info, device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
return rc;
@@ -4667,7 +5333,7 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
sdev_id(sdev), sdev->lun);
}
- if (device && device->expose_device) {
+ if (device) {
sdev->hostdata = device;
device->sdev = sdev;
if (device->queue_depth) {
@@ -4682,17 +5348,6 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_slave_configure(struct scsi_device *sdev)
-{
- struct pqi_scsi_dev *device;
-
- device = sdev->hostdata;
- if (!device->expose_device)
- sdev->no_uld_attach = true;
-
- return 0;
-}
-
static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
@@ -5005,12 +5660,55 @@ static ssize_t pqi_host_rescan_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
+static ssize_t pqi_lockup_action_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ int count = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (pqi_lockup_actions[i].action == pqi_lockup_action)
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ "[%s] ", pqi_lockup_actions[i].name);
+ else
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ "%s ", pqi_lockup_actions[i].name);
+ }
+
+ count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+
+ return count;
+}
+
+static ssize_t pqi_lockup_action_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ unsigned int i;
+ char *action_name;
+ char action_name_buffer[32];
+
+ strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
+ action_name = strstrip(action_name_buffer);
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
+ pqi_lockup_action = pqi_lockup_actions[i].action;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
+static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
+static DEVICE_ATTR(lockup_action, 0644,
+ pqi_lockup_action_show, pqi_lockup_action_store);
static struct device_attribute *pqi_shost_attrs[] = {
&dev_attr_version,
&dev_attr_rescan,
+ &dev_attr_lockup_action,
NULL
};
@@ -5055,7 +5753,7 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- buffer[0] = device->offload_enabled ? '1' : '0';
+ buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -5064,13 +5762,41 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
return 2;
}
-static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
+static ssize_t pqi_raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ char *raid_level;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (pqi_is_logical_device(device))
+ raid_level = pqi_raid_level_to_string(device->raid_level);
+ else
+ raid_level = "N/A";
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+}
+
+static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_ssd_smart_path_enabled,
+ &dev_attr_raid_level,
NULL
};
@@ -5086,7 +5812,6 @@ static struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
- .slave_configure = pqi_slave_configure,
.map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
@@ -5217,49 +5942,113 @@ out:
return rc;
}
-static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
- if (!sis_is_firmware_running(ctrl_info))
- return -ENXIO;
+ u32 table_length;
+ u32 section_offset;
+ void __iomem *table_iomem_addr;
+ struct pqi_config_table *config_table;
+ struct pqi_config_table_section_header *section;
+
+ table_length = ctrl_info->config_table_length;
+
+ config_table = kmalloc(table_length, GFP_KERNEL);
+ if (!config_table) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate memory for PQI configuration table\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Copy the config table contents from I/O memory space into the
+ * temporary buffer.
+ */
+ table_iomem_addr = ctrl_info->iomem_base +
+ ctrl_info->config_table_offset;
+ memcpy_fromio(config_table, table_iomem_addr, table_length);
+
+ section_offset =
+ get_unaligned_le32(&config_table->first_section_offset);
+
+ while (section_offset) {
+ section = (void *)config_table + section_offset;
- if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
- sis_disable_msix(ctrl_info);
- if (pqi_reset(ctrl_info) == 0)
- sis_reenable_sis_mode(ctrl_info);
+ switch (get_unaligned_le16(&section->section_id)) {
+ case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
+ if (pqi_disable_heartbeat)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "heartbeat disabled by module parameter\n");
+ else
+ ctrl_info->heartbeat_counter =
+ table_iomem_addr +
+ section_offset +
+ offsetof(
+ struct pqi_config_table_heartbeat,
+ heartbeat_counter);
+ break;
+ }
+
+ section_offset =
+ get_unaligned_le16(&section->next_section_offset);
}
+ kfree(config_table);
+
return 0;
}
-static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+/* Switches the controller from PQI mode back into SIS mode. */
+
+static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- if (reset_devices) {
- rc = pqi_kdump_init(ctrl_info);
- if (rc)
- return rc;
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
+ rc = pqi_reset(ctrl_info);
+ if (rc)
+ return rc;
+ sis_reenable_sis_mode(ctrl_info);
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+
+ return 0;
+}
+
+/*
+ * If the controller isn't already in SIS mode, this function forces it into
+ * SIS mode.
+ */
+
+static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!sis_is_firmware_running(ctrl_info))
+ return -ENXIO;
+
+ if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
+ return 0;
+
+ if (sis_is_kernel_up(ctrl_info)) {
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+ return 0;
}
- /*
- * When the controller comes out of reset, it is always running
- * in legacy SIS mode. This is so that it can be compatible
- * with legacy drivers shipped with OSes. So we have to talk
- * to it using SIS commands at first. Once we are satisified
- * that the controller supports PQI, we transition it into PQI
- * mode.
- */
+ return pqi_revert_to_sis_mode(ctrl_info);
+}
+
+static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
/*
* Wait until the controller is ready to start accepting SIS
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "error initializing SIS interface\n");
+ if (rc)
return rc;
- }
/*
* Get the controller properties. This allows us to determine
@@ -5279,9 +6068,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
- ctrl_info->max_outstanding_requests =
- PQI_MAX_OUTSTANDING_REQUESTS;
+ if (reset_devices) {
+ if (ctrl_info->max_outstanding_requests >
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
+ ctrl_info->max_outstanding_requests =
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
+ } else {
+ if (ctrl_info->max_outstanding_requests >
+ PQI_MAX_OUTSTANDING_REQUESTS)
+ ctrl_info->max_outstanding_requests =
+ PQI_MAX_OUTSTANDING_REQUESTS;
+ }
pqi_calculate_io_resources(ctrl_info);
@@ -5316,10 +6113,14 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = true;
pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+ rc = pqi_process_config_table(ctrl_info);
+ if (rc)
+ return rc;
+
rc = pqi_alloc_admin_queues(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error allocating admin queues\n");
+ "failed to allocate admin queues\n");
return rc;
}
@@ -5358,8 +6159,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
rc = pqi_alloc_operational_queues(ctrl_info);
- if (rc)
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate operational queues\n");
return rc;
+ }
pqi_init_operational_queues(ctrl_info);
@@ -5371,19 +6175,18 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- sis_enable_msix(ctrl_info);
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+ ctrl_info->controller_online = true;
+ pqi_start_heartbeat_timer(ctrl_info);
- rc = pqi_configure_events(ctrl_info);
+ rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error configuring events\n");
+ "error enabling events\n");
return rc;
}
- pqi_start_heartbeat_timer(ctrl_info);
-
- ctrl_info->controller_online = true;
-
/* Register with the SCSI subsystem. */
rc = pqi_register_scsi(ctrl_info);
if (rc)
@@ -5410,6 +6213,119 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return 0;
}
+static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_admin_queues *admin_queues;
+ struct pqi_event_queue *event_queue;
+
+ admin_queues = &ctrl_info->admin_queues;
+ admin_queues->iq_pi_copy = 0;
+ admin_queues->oq_ci_copy = 0;
+ *admin_queues->oq_pi = 0;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
+ ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
+ ctrl_info->queue_groups[i].oq_ci_copy = 0;
+
+ *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
+ *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
+ *ctrl_info->queue_groups[i].oq_pi = 0;
+ }
+
+ event_queue = &ctrl_info->event_queue;
+ *event_queue->oq_pi = 0;
+ event_queue->oq_ci_copy = 0;
+}
+
+static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Wait until the controller is ready to start accepting SIS
+ * commands.
+ */
+ rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
+ if (rc)
+ return rc;
+
+ /*
+ * If the function we are about to call succeeds, the
+ * controller will transition from legacy SIS mode
+ * into PQI mode.
+ */
+ rc = sis_init_base_struct_addr(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error initializing PQI mode\n");
+ return rc;
+ }
+
+ /* Wait for the controller to complete the SIS -> PQI transition. */
+ rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "transition to PQI mode failed\n");
+ return rc;
+ }
+
+ /* From here on, we are running in PQI mode. */
+ ctrl_info->pqi_mode_enabled = true;
+ pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+ pqi_reinit_queues(ctrl_info);
+
+ rc = pqi_create_admin_queues(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating admin queues\n");
+ return rc;
+ }
+
+ rc = pqi_create_queues(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+ ctrl_info->controller_online = true;
+ pqi_start_heartbeat_timer(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+
+ rc = pqi_enable_events(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error enabling events\n");
+ return rc;
+ }
+
+ rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error updating host wellness\n");
+ return rc;
+ }
+
+ pqi_schedule_update_time_worker(ctrl_info);
+
+ pqi_scan_scsi_devices(ctrl_info);
+
+ return 0;
+}
+
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
+ u16 timeout)
+{
+ return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
+}
+
static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -5450,12 +6366,23 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto release_regions;
}
- ctrl_info->registers = ctrl_info->iomem_base;
- ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
+
+ /* Increase the PCIe completion timeout. */
+ rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
+ PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to set PCIe completion timeout\n");
+ goto release_regions;
+ }
/* Enable bus mastering. */
pci_set_master(ctrl_info->pci_dev);
+ ctrl_info->registers = ctrl_info->iomem_base;
+ ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+
pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
return 0;
@@ -5472,7 +6399,8 @@ static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
{
iounmap(ctrl_info->iomem_base);
pci_release_regions(ctrl_info->pci_dev);
- pci_disable_device(ctrl_info->pci_dev);
+ if (pci_is_enabled(ctrl_info->pci_dev))
+ pci_disable_device(ctrl_info->pci_dev);
pci_set_drvdata(ctrl_info->pci_dev, NULL);
}
@@ -5486,6 +6414,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
return NULL;
mutex_init(&ctrl_info->scan_mutex);
+ mutex_init(&ctrl_info->lun_reset_mutex);
INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
spin_lock_init(&ctrl_info->scsi_device_list_lock);
@@ -5496,11 +6425,20 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
+ init_timer(&ctrl_info->heartbeat_timer);
+ INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
+
sema_init(&ctrl_info->sync_request_sem,
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
- sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
+ init_waitqueue_head(&ctrl_info->block_requests_wait);
+
+ INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
+ spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
+ INIT_WORK(&ctrl_info->raid_bypass_retry_work,
+ pqi_raid_bypass_retry_worker);
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+ ctrl_info->irq_mode = IRQ_MODE_NONE;
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
return ctrl_info;
@@ -5513,14 +6451,8 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
- &ctrl_info->queue_groups[i]);
- }
-
- pci_free_irq_vectors(ctrl_info->pci_dev);
+ pqi_free_irqs(ctrl_info);
+ pqi_disable_msix_interrupts(ctrl_info);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
@@ -5550,73 +6482,142 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
- cancel_delayed_work_sync(&ctrl_info->rescan_work);
- cancel_delayed_work_sync(&ctrl_info->update_time_work);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
pqi_remove_all_scsi_devices(ctrl_info);
pqi_unregister_scsi(ctrl_info);
+ if (ctrl_info->pqi_mode_enabled)
+ pqi_revert_to_sis_mode(ctrl_info);
+ pqi_free_ctrl_resources(ctrl_info);
+}
- if (ctrl_info->pqi_mode_enabled) {
- sis_disable_msix(ctrl_info);
- if (pqi_reset(ctrl_info) == 0)
- sis_reenable_sis_mode(ctrl_info);
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
+
+static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
+ .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
+ .status = SAM_STAT_CHECK_CONDITION,
+};
+
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+
+ scmd = io_request->scmd;
+ if (scmd) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ } else {
+ io_request->status = -ENXIO;
+ io_request->error_info =
+ &pqi_ctrl_offline_raid_error_info;
+ }
+
+ io_request->io_complete_callback(io_request,
+ io_request->context);
}
- pqi_free_ctrl_resources(ctrl_info);
}
-static void pqi_print_ctrl_info(struct pci_dev *pdev,
+static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_perform_lockup_action();
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_free_interrupts(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+}
+
+static void pqi_ctrl_offline_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
+ pqi_take_ctrl_offline_deferred(ctrl_info);
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!ctrl_info->controller_online)
+ return;
+
+ ctrl_info->controller_online = false;
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_ctrl_block_requests(ctrl_info);
+ if (!pqi_disable_ctrl_shutdown)
+ sis_shutdown_ctrl(ctrl_info);
+ pci_disable_device(ctrl_info->pci_dev);
+ dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ schedule_work(&ctrl_info->ctrl_offline_work);
+}
+
+static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
char *ctrl_description;
- if (id->driver_data) {
+ if (id->driver_data)
ctrl_description = (char *)id->driver_data;
- } else {
- switch (id->subvendor) {
- case PCI_VENDOR_ID_HP:
- ctrl_description = hpe_branded_controller;
- break;
- case PCI_VENDOR_ID_ADAPTEC2:
- default:
- ctrl_description = microsemi_branded_controller;
- break;
- }
- }
+ else
+ ctrl_description = "Microsemi Smart Family Controller";
- dev_info(&pdev->dev, "%s found\n", ctrl_description);
+ dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
-static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int pqi_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
int rc;
int node;
struct pqi_ctrl_info *ctrl_info;
- pqi_print_ctrl_info(pdev, id);
+ pqi_print_ctrl_info(pci_dev, id);
if (pqi_disable_device_id_wildcards &&
id->subvendor == PCI_ANY_ID &&
id->subdevice == PCI_ANY_ID) {
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"controller not probed because device ID wildcards are disabled\n");
return -ENODEV;
}
if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"controller device ID matched using wildcards\n");
- node = dev_to_node(&pdev->dev);
+ node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, 0);
+ set_dev_node(&pci_dev->dev, 0);
ctrl_info = pqi_alloc_ctrl_info(node);
if (!ctrl_info) {
- dev_err(&pdev->dev,
+ dev_err(&pci_dev->dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
- ctrl_info->pci_dev = pdev;
+ ctrl_info->pci_dev = pci_dev;
rc = pqi_pci_init(ctrl_info);
if (rc)
@@ -5634,23 +6635,23 @@ error:
return rc;
}
-static void pqi_pci_remove(struct pci_dev *pdev)
+static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
- ctrl_info = pci_get_drvdata(pdev);
+ ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
pqi_remove_ctrl(ctrl_info);
}
-static void pqi_shutdown(struct pci_dev *pdev)
+static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
- ctrl_info = pci_get_drvdata(pdev);
+ ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
goto error;
@@ -5663,115 +6664,284 @@ static void pqi_shutdown(struct pci_dev *pdev)
return;
error:
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"unable to flush controller cache\n");
}
+static void pqi_process_lockup_action_param(void)
+{
+ unsigned int i;
+
+ if (!pqi_lockup_action_param)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (strcmp(pqi_lockup_action_param,
+ pqi_lockup_actions[i].name) == 0) {
+ pqi_lockup_action = pqi_lockup_actions[i].action;
+ return;
+ }
+ }
+
+ pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
+ DRIVER_NAME_SHORT, pqi_lockup_action_param);
+}
+
+static void pqi_process_module_params(void)
+{
+ pqi_process_lockup_action_param();
+}
+
+static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ pqi_disable_events(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_wait_until_scan_finished(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
+ pqi_flush_cache(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_ctrl_wait_for_pending_io(ctrl_info);
+ pqi_stop_heartbeat_timer(ctrl_info);
+
+ if (state.event == PM_EVENT_FREEZE)
+ return 0;
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ ctrl_info->controller_online = false;
+ ctrl_info->pqi_mode_enabled = false;
+
+ return 0;
+}
+
+static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ if (pci_dev->current_state != PCI_D0) {
+ ctrl_info->max_hw_queue_index = 0;
+ pqi_free_interrupts(ctrl_info);
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
+ rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
+ IRQF_SHARED, DRIVER_NAME_SHORT,
+ &ctrl_info->queue_groups[0]);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "irq %u init failed with error %d\n",
+ pci_dev->irq, rc);
+ return rc;
+ }
+ pqi_start_heartbeat_timer(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ return 0;
+ }
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a22)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a23)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a24)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a36)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a37)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0600)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0605)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0601)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0602)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0801)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0603)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0802)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0650)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0803)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0651)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0804)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0652)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0805)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0653)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0806)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0654)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0655)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0901)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0700)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0902)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0701)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0903)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0800)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0904)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0801)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0905)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0802)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0906)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0803)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0907)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0804)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0908)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0805)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1200)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0900)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1201)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0901)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1202)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0902)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1280)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0903)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1281)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0904)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1300)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0905)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1301)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0906)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1380)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0600)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0601)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0602)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0603)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0604)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0606)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0650)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0651)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0652)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0653)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0654)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0655)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0656)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0657)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0700)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0701)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -5808,6 +6978,10 @@ static struct pci_driver pqi_pci_driver = {
.probe = pqi_pci_probe,
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
+#if defined(CONFIG_PM)
+ .suspend = pqi_suspend,
+ .resume = pqi_resume,
+#endif
};
static int __init pqi_init(void)
@@ -5821,6 +6995,8 @@ static int __init pqi_init(void)
if (!pqi_sas_transport_template)
return -ENODEV;
+ pqi_process_module_params();
+
rc = pci_register_driver(&pqi_pci_driver);
if (rc)
sas_release_transport(pqi_sas_transport_template);
@@ -6173,6 +7349,9 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_event_config,
descriptors) != 4);
+ BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
+ ARRAY_SIZE(pqi_supported_event_types));
+
BUILD_BUG_ON(offsetof(struct pqi_event_response,
header.iu_type) != 0);
BUILD_BUG_ON(offsetof(struct pqi_event_response,
@@ -6246,6 +7425,22 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
controller_mode) != 292);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ phys_bay_in_box) != 115);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ device_type) != 120);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ redundant_path_present_map) != 1736);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ active_path_number) != 1738);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ alternate_paths_phys_connector) != 1739);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ alternate_paths_phys_box_on_port) != 1755);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ current_queue_depth_limit) != 1796);
+ BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
+
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
@@ -6260,4 +7455,6 @@ static void __attribute__((unused)) verify_structures(void)
PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
+ BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 52ca4f93f1b2..0d89d3728b43 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 71408f9e8f75..e55dfcf200e5 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,9 @@
/* for submission of legacy SIS commands */
#define SIS_REENABLE_SIS_MODE 0x1
#define SIS_ENABLE_MSIX 0x40
+#define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100
+#define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_CMD_READY 0x200
#define SIS_CMD_COMPLETE 0x1000
#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000
@@ -55,6 +57,7 @@
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 30
+#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
#pragma pack(1)
@@ -78,12 +81,13 @@ struct sis_base_struct {
#pragma pack()
-int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
+ unsigned int timeout_secs)
{
unsigned long timeout;
u32 status;
- timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (timeout_secs * HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -98,14 +102,30 @@ int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
if (status & SIS_CTRL_KERNEL_UP)
break;
}
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller not ready after %u seconds\n",
+ timeout_secs);
return -ETIMEDOUT;
+ }
msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS);
}
return 0;
}
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+{
+ return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+ SIS_CTRL_READY_TIMEOUT_SECS);
+}
+
+int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
+{
+ return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+ SIS_CTRL_READY_RESUME_TIMEOUT_SECS);
+}
+
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
{
bool running;
@@ -126,6 +146,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
return running;
}
+bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_UP;
+}
+
/* used for passing command parameters/results when issuing SIS commands */
struct sis_sync_cmd_params {
u32 mailbox[6]; /* mailboxes 0-5 */
@@ -308,6 +334,34 @@ out:
return rc;
}
+#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30
+
+static void sis_wait_for_doorbell_bit_to_clear(
+ struct pqi_ctrl_info *ctrl_info, u32 bit)
+{
+ u32 doorbell_register;
+ unsigned long timeout;
+
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
+
+ while (1) {
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ if ((doorbell_register & bit) == 0)
+ break;
+ if (readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_PANIC)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "doorbell register bit 0x%x not cleared\n",
+ bit);
+ break;
+ }
+ usleep_range(1000, 2000);
+ }
+}
+
/* Enable MSI-X interrupts on the controller. */
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
@@ -320,6 +374,8 @@ void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
writel(doorbell_register,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+ sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
}
/* Disable MSI-X interrupts on the controller. */
@@ -336,12 +392,48 @@ void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register |= SIS_ENABLE_INTX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+ sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
+}
+
+void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register &= ~SIS_ENABLE_INTX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
+{
+ if (readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_PANIC)
+ return;
+
+ writel(SIS_TRIGGER_SHUTDOWN,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
#define SIS_MODE_READY_TIMEOUT_SECS 30
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index bd6e7b08338e..983184b69373 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -20,13 +20,18 @@
#define _SMARTPQI_SIS_H
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info);
int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
+void sis_disable_intx(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index d859501e4ccd..c4da3673f2ae 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -141,7 +141,7 @@ snic_request_intr(struct snic *snic)
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
- "MSI-X: requrest_irq(%d) failed %d\n",
+ "MSI-X: request_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
@@ -151,7 +151,7 @@ snic_request_intr(struct snic *snic)
}
return ret;
-} /* end of snic_requrest_intr */
+} /* end of snic_request_intr */
int
snic_set_intr_mode(struct snic *snic)
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index da979a73baa0..d8a376b7882d 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -359,8 +359,6 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
- memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state));
-
ret = snic_issue_scsi_req(snic, tgt, sc);
if (ret) {
SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
@@ -1262,7 +1260,7 @@ snic_io_cmpl_handler(struct vnic_dev *vdev,
default:
SNIC_BUG_ON(1);
SNIC_SCSI_DBG(snic->shost,
- "Unknown Firmwqre completion request type %d\n",
+ "Unknown Firmware completion request type %d\n",
fwreq->hdr.type);
break;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 1ea34d6f5437..8e5013d9cad4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -511,7 +511,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, int uptodate)
+static void st_scsi_execute_end(struct request *req, blk_status_t status)
{
struct st_request *SRpnt = req->end_io_data;
struct scsi_request *rq = scsi_req(req);
@@ -549,7 +549,6 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
if (IS_ERR(req))
return DRIVER_ERROR << 24;
rq = scsi_req(req);
- scsi_req_init(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ae966dc3bbc5..3cc8d67783a1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1149,13 +1149,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
static void storvsc_on_channel_callback(void *context)
{
struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ const struct vmpacket_descriptor *desc;
struct hv_device *device;
struct storvsc_device *stor_device;
- u32 bytes_recvd;
- u64 request_id;
- unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct storvsc_cmd_request *request;
- int ret;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1166,32 +1162,22 @@ static void storvsc_on_channel_callback(void *context)
if (!stor_device)
return;
- do {
- ret = vmbus_recvpacket(channel, packet,
- ALIGN((sizeof(struct vstor_packet) -
- vmscsi_size_delta), 8),
- &bytes_recvd, &request_id);
- if (ret == 0 && bytes_recvd > 0) {
-
- request = (struct storvsc_cmd_request *)
- (unsigned long)request_id;
-
- if ((request == &stor_device->init_request) ||
- (request == &stor_device->reset_request)) {
-
- memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta));
- complete(&request->wait_event);
- } else {
- storvsc_on_receive(stor_device,
- (struct vstor_packet *)packet,
- request);
- }
+ foreach_vmbus_pkt(desc, channel) {
+ void *packet = hv_pkt_data(desc);
+ struct storvsc_cmd_request *request;
+
+ request = (struct storvsc_cmd_request *)
+ ((unsigned long)desc->trans_id);
+
+ if (request == &stor_device->init_request ||
+ request == &stor_device->reset_request) {
+ memcpy(&request->vstor_packet, packet,
+ (sizeof(struct vstor_packet) - vmscsi_size_delta));
+ complete(&request->wait_event);
} else {
- break;
+ storvsc_on_receive(stor_device, packet, request);
}
- } while (1);
+ }
}
static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
@@ -1220,13 +1206,13 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
static int storvsc_dev_remove(struct hv_device *device)
{
struct storvsc_device *stor_device;
- unsigned long flags;
stor_device = hv_get_drvdata(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device->destroy = true;
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Make sure flag is set before waiting */
+ wmb();
/*
* At this point, all outbound traffic should be disable. We
@@ -1243,9 +1229,7 @@ static int storvsc_dev_remove(struct hv_device *device)
* we have drained - to drain the outgoing packets, we need to
* allow incoming packets.
*/
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
hv_set_drvdata(device, NULL);
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Close the channel */
vmbus_close(device->channel);
@@ -1511,6 +1495,10 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
*/
static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
{
+#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+ if (scmnd->device->host->transportt == fc_transport_template)
+ return fc_eh_timed_out(scmnd);
+#endif
return BLK_EH_RESET_TIMER;
}
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 7b6d4c2087d7..747ee64a78e1 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -566,6 +566,7 @@ static int esp_sbus_probe(struct platform_device *op)
struct device_node *dp = op->dev.of_node;
struct platform_device *dma_of = NULL;
int hme = 0;
+ int ret;
if (dp->parent &&
(!strcmp(dp->parent->name, "espdma") ||
@@ -580,7 +581,11 @@ static int esp_sbus_probe(struct platform_device *op)
if (!dma_of)
return -ENODEV;
- return esp_sbus_probe_one(op, dma_of, hme);
+ ret = esp_sbus_probe_one(op, dma_of, hme);
+ if (ret)
+ put_device(&dma_of->dev);
+
+ return ret;
}
static int esp_sbus_remove(struct platform_device *op)
@@ -613,6 +618,8 @@ static int esp_sbus_remove(struct platform_device *op)
dev_set_drvdata(&op->dev, NULL);
+ put_device(&dma_of->dev);
+
return 0;
}
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index c09a0fef0fe6..325d5e14fc0d 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -130,8 +130,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- INIT_LIST_HEAD(&hba->clk_list_head);
-
hba->vops = &tc_dwc_g210_pci_hba_vops;
err = ufshcd_init(hba, mmio_base, pdev->irq);
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 52b546fb509b..925b0ec7ec54 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -37,7 +37,42 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#ifdef CONFIG_PM
+static int ufs_intel_disable_lcc(struct ufs_hba *hba)
+{
+ u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
+ u32 lcc_enable = 0;
+
+ ufshcd_dme_get(hba, attr, &lcc_enable);
+ if (lcc_enable)
+ ufshcd_dme_set(hba, attr, 0);
+
+ return 0;
+}
+
+static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_intel_disable_lcc(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
+ .name = "intel-pci",
+ .link_startup_notify = ufs_intel_link_startup_notify,
+};
+
+#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_pci_suspend - suspend power management function
* @pdev: pointer to PCI device handle
@@ -62,7 +97,9 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+#endif /* !CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM
static int ufshcd_pci_runtime_suspend(struct device *dev)
{
return ufshcd_runtime_suspend(dev_get_drvdata(dev));
@@ -75,13 +112,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev)
{
return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
-#else /* !CONFIG_PM */
-#define ufshcd_pci_suspend NULL
-#define ufshcd_pci_resume NULL
-#define ufshcd_pci_runtime_suspend NULL
-#define ufshcd_pci_runtime_resume NULL
-#define ufshcd_pci_runtime_idle NULL
-#endif /* CONFIG_PM */
+#endif /* !CONFIG_PM */
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
@@ -143,7 +174,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- INIT_LIST_HEAD(&hba->clk_list_head);
+ hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
@@ -160,15 +191,16 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
- .runtime_suspend = ufshcd_pci_runtime_suspend,
- .runtime_resume = ufshcd_pci_runtime_resume,
- .runtime_idle = ufshcd_pci_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
+ ufshcd_pci_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
+ ufshcd_pci_runtime_resume,
+ ufshcd_pci_runtime_idle)
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8e5e6c04c035..e82bde077296 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -58,8 +58,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
if (!np)
goto out;
- INIT_LIST_HEAD(&hba->clk_list_head);
-
cnt = of_property_count_strings(np, "clock-names");
if (!cnt || (cnt == -EINVAL)) {
dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ffe8d8608818..5bc9dc14e075 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -314,7 +314,7 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
return;
list_for_each_entry(clki, head, list) {
@@ -869,7 +869,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
ktime_t start = ktime_get();
bool clk_state_changed = false;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
@@ -943,7 +943,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
return false;
list_for_each_entry(clki, head, list) {
@@ -5809,7 +5809,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
do {
spin_lock_irqsave(hba->host->host_lock, flags);
if (!(work_pending(&hba->eh_work) ||
- hba->ufshcd_state == UFSHCD_STATE_RESET))
+ hba->ufshcd_state == UFSHCD_STATE_RESET ||
+ hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
break;
spin_unlock_irqrestore(hba->host->host_lock, flags);
dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
@@ -6752,7 +6753,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
ktime_t start = ktime_get();
bool clk_state_changed = false;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
@@ -6818,7 +6819,7 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
list_for_each_entry(clki, head, list) {
@@ -7811,6 +7812,8 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
*hba_handle = hba;
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
out_error:
return err;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index f8dbfeee6c63..8b93197daefe 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -547,7 +547,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
dev_dbg(&sc->device->sdev_gendev,
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
- memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
@@ -796,6 +795,16 @@ static int virtscsi_map_queues(struct Scsi_Host *shost)
return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
}
+/*
+ * The host guarantees to respond to each command, although I/O
+ * latencies might be higher than on bare metal. Reset the timer
+ * unconditionally to give the host a chance to perform EH.
+ */
+static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+ return BLK_EH_RESET_TIMER;
+}
+
static struct scsi_host_template virtscsi_host_template_single = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
@@ -806,6 +815,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .eh_timed_out = virtscsi_eh_timed_out,
.slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
@@ -826,6 +836,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .eh_timed_out = virtscsi_eh_timed_out,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index a6a8b60d4902..36f59a1be7e9 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -534,7 +534,6 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
int err;
sc->result = 0;
- memset(shadow, 0, sizeof(*shadow));
shadow->sc = sc;
shadow->act = VSCSIIF_ACT_SCSI_CDB;