summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c9
-rw-r--r--drivers/scsi/3w-sas.c21
-rw-r--r--drivers/scsi/3w-xxxx.c10
-rw-r--r--drivers/scsi/53c700.c19
-rw-r--r--drivers/scsi/BusLogic.c9
-rw-r--r--drivers/scsi/BusLogic.h3
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/linit.c10
-rw-r--r--drivers/scsi/advansys.c25
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c6
-rw-r--r--drivers/scsi/am53c974.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c32
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/arm/fas216.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/bfa/bfad.c12
-rw-r--r--drivers/scsi/bfa/bfad_im.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c7
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c8
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_mb.c4
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c20
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/Kconfig13
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3968
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/dc395x.c26
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c6
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c2
-rw-r--r--drivers/scsi/esas2r/esas2r.h12
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c32
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/fdomain_pci.c2
-rw-r--r--drivers/scsi/fnic/Makefile5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c5040
-rw-r--r--drivers/scsi/fnic/fdls_fc.h253
-rw-r--r--drivers/scsi/fnic/fip.c1005
-rw-r--r--drivers/scsi/fnic/fip.h159
-rw-r--r--drivers/scsi/fnic/fnic.h288
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c11
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c1744
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h435
-rw-r--r--drivers/scsi/fnic/fnic_fip.h48
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c28
-rw-r--r--drivers/scsi/fnic/fnic_main.c758
-rw-r--r--drivers/scsi/fnic/fnic_pci_subsys_devid.c131
-rw-r--r--drivers/scsi/fnic/fnic_res.c77
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c1161
-rw-r--r--drivers/scsi/fnic/fnic_stats.h49
-rw-r--r--drivers/scsi/fnic/fnic_trace.c97
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c96
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c33
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c38
-rw-r--r--drivers/scsi/hosts.c18
-rw-r--r--drivers/scsi/hpsa.c39
-rw-r--r--drivers/scsi/hptiop.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c36
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c14
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c60
-rw-r--r--drivers/scsi/ips.c14
-rw-r--r--drivers/scsi/ips.h3
-rw-r--r--drivers/scsi/isci/host.c12
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/isci/isci.h15
-rw-r--r--drivers/scsi/isci/remote_device.c29
-rw-r--r--drivers/scsi/isci/remote_device.h19
-rw-r--r--drivers/scsi/iscsi_tcp.c66
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c10
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c19
-rw-r--r--drivers/scsi/lpfc/lpfc.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h85
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c72
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c78
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c24
-rw-r--r--drivers/scsi/megaraid.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c54
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c7
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h35
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c129
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c167
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c123
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c25
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c280
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c31
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/myrb.c25
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/ncr53c8xx.c9
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c79
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c59
-rw-r--r--drivers/scsi/pmcraid.c30
-rw-r--r--drivers/scsi/ps3rom.c5
-rw-r--r--drivers/scsi/qedf/qedf_attr.c10
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c7
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c10
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/qlogicpti.c5
-rw-r--r--drivers/scsi/scsi.c64
-rw-r--r--drivers/scsi/scsi_debug.c960
-rw-r--r--drivers/scsi/scsi_error.c45
-rw-r--r--drivers/scsi/scsi_ioctl.c35
-rw-r--r--drivers/scsi/scsi_lib.c39
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c54
-rw-r--r--drivers/scsi/scsi_sysctl.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c20
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c55
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c3
-rw-r--r--drivers/scsi/scsi_transport_srp.c2
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c123
-rw-r--r--drivers/scsi/snic/snic_main.c14
-rw-r--r--drivers/scsi/st.c80
-rw-r--r--drivers/scsi/st.h6
-rw-r--r--drivers/scsi/stex.c6
-rw-r--r--drivers/scsi/storvsc_drv.c35
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c19
-rw-r--r--drivers/scsi/virtio_scsi.c5
-rw-r--r--drivers/scsi/xen-scsifront.c11
222 files changed, 12893 insertions, 14413 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 6fb61c88ea11..883d4a12a172 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1968,13 +1968,14 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
} /* End twa_string_lookup() */
/* This function gets called when a disk is coming on-line */
-static int twa_slave_configure(struct scsi_device *sdev)
+static int twa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twa_slave_configure() */
+} /* End twa_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1984,7 +1985,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twa_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twa_slave_configure,
+ .sdev_configure = twa_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2260,7 +2261,7 @@ out_disable_device:
} /* End twa_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twa_pci_tbl[] = {
+static const struct pci_device_id twa_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index caa6713a62a4..e057ab9c7b90 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -96,7 +96,7 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
/* This function returns AENs through sysfs */
static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -116,18 +116,18 @@ static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_aen_read() */
/* aen_read sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_aen_read_attr = {
+static const struct bin_attribute twl_sysfs_aen_read_attr = {
.attr = {
.name = "3ware_aen_read",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_aen_read
+ .read_new = twl_sysfs_aen_read
};
/* This function returns driver compatibility info through sysfs */
static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *outbuf, loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -147,13 +147,13 @@ static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
} /* End twl_sysfs_compat_info() */
/* compat_info sysfs attribute initializer */
-static struct bin_attribute twl_sysfs_compat_info_attr = {
+static const struct bin_attribute twl_sysfs_compat_info_attr = {
.attr = {
.name = "3ware_compat_info",
.mode = S_IRUSR,
},
.size = 0,
- .read = twl_sysfs_compat_info
+ .read_new = twl_sysfs_compat_info
};
/* Show some statistics about the card */
@@ -1523,13 +1523,14 @@ static void twl_shutdown(struct pci_dev *pdev)
} /* End twl_shutdown() */
/* This function configures unit settings when a unit is coming on-line */
-static int twl_slave_configure(struct scsi_device *sdev)
+static int twl_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End twl_slave_configure() */
+} /* End twl_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -1539,7 +1540,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = twl_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = twl_slave_configure,
+ .sdev_configure = twl_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -1821,7 +1822,7 @@ out_disable_device:
} /* End twl_resume() */
/* PCI Devices supported by this driver */
-static struct pci_device_id twl_pci_tbl[] = {
+static const struct pci_device_id twl_pci_tbl[] = {
{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
{ }
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2c0fb6da0e60..89bd56f78ef9 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -172,7 +172,7 @@
Initialize queues correctly when loading with no valid units.
1.02.00.034 - Fix tw_decode_bits() to handle multiple errors.
Add support for user configurable cmd_per_lun.
- Add support for sht->slave_configure().
+ Add support for sht->sdev_configure().
1.02.00.035 - Improve tw_allocate_memory() memory allocation.
Fix tw_chrdev_ioctl() to sleep correctly.
1.02.00.036 - Increase character ioctl timeout to 60 seconds.
@@ -2221,13 +2221,13 @@ static void tw_shutdown(struct pci_dev *pdev)
} /* End tw_shutdown() */
/* This function gets called when a disk is coming online */
-static int tw_slave_configure(struct scsi_device *sdev)
+static int tw_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
/* Force 60 second timeout */
blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
return 0;
-} /* End tw_slave_configure() */
+} /* End tw_sdev_configure() */
static const struct scsi_host_template driver_template = {
.module = THIS_MODULE,
@@ -2237,7 +2237,7 @@ static const struct scsi_host_template driver_template = {
.bios_param = tw_scsi_biosparam,
.change_queue_depth = scsi_change_queue_depth,
.can_queue = TW_Q_LENGTH-2,
- .slave_configure = tw_slave_configure,
+ .sdev_configure = tw_sdev_configure,
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
@@ -2393,7 +2393,7 @@ static void tw_remove(struct pci_dev *pdev)
} /* End tw_remove() */
/* PCI Devices supported by this driver */
-static struct pci_device_id tw_pci_tbl[] = {
+static const struct pci_device_id tw_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 85439e976143..71b7ac027f48 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -158,9 +158,10 @@ STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
-STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
-STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
-STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_init(struct scsi_device *SDpnt);
+STATIC int NCR_700_sdev_configure(struct scsi_device *SDpnt,
+ struct queue_limits *lim);
+STATIC void NCR_700_sdev_destroy(struct scsi_device *SDpnt);
static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
STATIC const struct attribute_group *NCR_700_dev_groups[];
@@ -330,9 +331,9 @@ NCR_700_detect(struct scsi_host_template *tpnt,
tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
- tpnt->slave_configure = NCR_700_slave_configure;
- tpnt->slave_destroy = NCR_700_slave_destroy;
- tpnt->slave_alloc = NCR_700_slave_alloc;
+ tpnt->sdev_configure = NCR_700_sdev_configure;
+ tpnt->sdev_destroy = NCR_700_sdev_destroy;
+ tpnt->sdev_init = NCR_700_sdev_init;
tpnt->change_queue_depth = NCR_700_change_queue_depth;
if(tpnt->name == NULL)
@@ -2017,7 +2018,7 @@ NCR_700_set_offset(struct scsi_target *STp, int offset)
}
STATIC int
-NCR_700_slave_alloc(struct scsi_device *SDp)
+NCR_700_sdev_init(struct scsi_device *SDp)
{
SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
GFP_KERNEL);
@@ -2029,7 +2030,7 @@ NCR_700_slave_alloc(struct scsi_device *SDp)
}
STATIC int
-NCR_700_slave_configure(struct scsi_device *SDp)
+NCR_700_sdev_configure(struct scsi_device *SDp, struct queue_limits *lim)
{
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
@@ -2052,7 +2053,7 @@ NCR_700_slave_configure(struct scsi_device *SDp)
}
STATIC void
-NCR_700_slave_destroy(struct scsi_device *SDp)
+NCR_700_sdev_destroy(struct scsi_device *SDp)
{
kfree(SDp->hostdata);
SDp->hostdata = NULL;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 2135a2b3e2d0..1f100270cd38 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2153,14 +2153,15 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
}
/*
- blogic_slaveconfig will actually set the queue depth on individual
+ blogic_sdev_configure will actually set the queue depth on individual
scsi devices as they are permanently added to the device chain. We
shamelessly rip off the SelectQueueDepths code to make this work mostly
like it used to. Since we don't get called once at the end of the scan
but instead get called for each device, we have to do things a bit
differently.
*/
-static int blogic_slaveconfig(struct scsi_device *dev)
+static int blogic_sdev_configure(struct scsi_device *dev,
+ struct queue_limits *lim)
{
struct blogic_adapter *adapter =
(struct blogic_adapter *) dev->host->hostdata;
@@ -3672,7 +3673,7 @@ static const struct scsi_host_template blogic_template = {
.name = "BusLogic",
.info = blogic_drvr_info,
.queuecommand = blogic_qcmd,
- .slave_configure = blogic_slaveconfig,
+ .sdev_configure = blogic_sdev_configure,
.bios_param = blogic_diskparam,
.eh_host_reset_handler = blogic_hostreset,
#if 0
@@ -3715,7 +3716,7 @@ static void __exit blogic_exit(void)
__setup("BusLogic=", blogic_setup);
#ifdef MODULE
-/*static struct pci_device_id blogic_pci_tbl[] = {
+/*static const struct pci_device_id blogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 7d1ec10f2430..61bf26d4fc10 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1274,7 +1274,8 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
static const char *blogic_drvr_info(struct Scsi_Host *);
static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
-static int blogic_slaveconfig(struct scsi_device *);
+static int blogic_sdev_configure(struct scsi_device *,
+ struct queue_limits *lim);
static void blogic_qcompleted_ccb(struct blogic_ccb *);
static irqreturn_t blogic_inthandler(int, void *);
static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 37c24ffea65c..5a3c670aec27 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -303,9 +303,9 @@ if SCSI_LOWLEVEL && SCSI
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
+ select CRC32
select CRYPTO
select CRYPTO_MD5
- select CRYPTO_CRC32C
select SCSI_ISCSI_ATTRS
help
The iSCSI Driver provides a host with the ability to access storage
@@ -336,7 +336,6 @@ source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
-source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1313ddf2fd1a..16de3e41f94c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
-obj-$(CONFIG_CXLFLASH) += cxlflash/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index b95147fb18b0..a8979f9e30ff 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1206,7 +1206,7 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_host_put(shost);
}
-static struct pci_device_id inia100_pci_tbl[] = {
+static const struct pci_device_id inia100_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index abf6a82b74af..0be719f38377 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3221,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
break;
}
fallthrough;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 68f4dbcfff49..4b12e6dd8f07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -377,15 +377,17 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
}
/**
- * aac_slave_configure - compute queue depths
+ * aac_sdev_configure - compute queue depths
* @sdev: SCSI device we are considering
+ * @lim: Request queue limits
*
* Selects queue depths for each target device based on the host adapter's
* total capacity and the queue depth supported by the target device.
* A queue depth of one automatically disables tagged queueing.
*/
-static int aac_slave_configure(struct scsi_device *sdev)
+static int aac_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
int chn, tid;
@@ -1487,7 +1489,7 @@ static const struct scsi_host_template aac_driver_template = {
.queuecommand = aac_queuecommand,
.bios_param = aac_biosparm,
.shost_groups = aac_host_groups,
- .slave_configure = aac_slave_configure,
+ .sdev_configure = aac_sdev_configure,
.change_queue_depth = aac_change_queue_depth,
.sdev_groups = aac_dev_groups,
.eh_abort_handler = aac_eh_abort,
@@ -2027,7 +2029,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
}
-static struct pci_error_handlers aac_pci_err_handler = {
+static const struct pci_error_handlers aac_pci_err_handler = {
.error_detected = aac_pci_error_detected,
.mmio_enabled = aac_pci_mmio_enabled,
.slot_reset = aac_pci_slot_reset,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index fd4fcb37863d..3a2c336307c0 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -4496,7 +4496,7 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5013,7 +5013,7 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -5508,7 +5508,7 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc)
/*
* Microcode operating variables for WDTR, SDTR, and command tag
- * queuing will be set in slave_configure() based on what a
+ * queuing will be set in sdev_configure() based on what a
* device reports it is capable of in Inquiry byte 7.
*
* If SCSI Bus Resets have been disabled, then directly set
@@ -7219,7 +7219,7 @@ static void AscAsyncFix(ASC_DVC_VAR *asc_dvc, struct scsi_device *sdev)
}
static void
-advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
+advansys_narrow_sdev_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc)
{
ASC_SCSI_BIT_ID_TYPE tid_bit = 1 << sdev->id;
ASC_SCSI_BIT_ID_TYPE orig_use_tagged_qng = asc_dvc->use_tagged_qng;
@@ -7345,7 +7345,7 @@ static void advansys_wide_enable_ppr(ADV_DVC_VAR *adv_dvc,
}
static void
-advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
+advansys_wide_sdev_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
{
AdvPortAddr iop_base = adv_dvc->iop_base;
unsigned short tidmask = 1 << sdev->id;
@@ -7391,16 +7391,17 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc)
* Set the number of commands to queue per device for the
* specified host adapter.
*/
-static int advansys_slave_configure(struct scsi_device *sdev)
+static int advansys_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct asc_board *boardp = shost_priv(sdev->host);
if (ASC_NARROW_BOARD(boardp))
- advansys_narrow_slave_configure(sdev,
- &boardp->dvc_var.asc_dvc_var);
+ advansys_narrow_sdev_configure(sdev,
+ &boardp->dvc_var.asc_dvc_var);
else
- advansys_wide_slave_configure(sdev,
- &boardp->dvc_var.adv_dvc_var);
+ advansys_wide_sdev_configure(sdev,
+ &boardp->dvc_var.adv_dvc_var);
return 0;
}
@@ -10612,7 +10613,7 @@ static const struct scsi_host_template advansys_template = {
.queuecommand = advansys_queuecommand,
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
- .slave_configure = advansys_slave_configure,
+ .sdev_configure = advansys_sdev_configure,
.cmd_size = sizeof(struct advansys_cmd),
};
@@ -11408,7 +11409,7 @@ static struct eisa_driver advansys_eisa_driver = {
};
/* PCI Devices supported by this driver */
-static struct pci_device_id advansys_pci_tbl[] = {
+static const struct pci_device_id advansys_pci_tbl[] = {
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 3e3100dbfda3..f9372a81cd4e 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6181,7 +6181,7 @@ ahd_shutdown(void *arg)
/*
* Stop periodic timer callbacks.
*/
- del_timer_sync(&ahd->stat_timer);
+ timer_delete_sync(&ahd->stat_timer);
/* This will reset most registers to 0, but not all */
ahd_reset(ahd, /*reinit*/FALSE);
@@ -6975,7 +6975,7 @@ static const char *termstat_strings[] = {
static void
ahd_timer_reset(struct timer_list *timer, int usec)
{
- del_timer(timer);
+ timer_delete(timer);
timer->expires = jiffies + (usec * HZ)/1000000;
add_timer(timer);
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4202059815a0..17dfc3c72110 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -672,7 +672,7 @@ ahd_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahd_linux_slave_alloc(struct scsi_device *sdev)
+ahd_linux_sdev_init(struct scsi_device *sdev)
{
struct ahd_softc *ahd =
*((struct ahd_softc **)sdev->host->hostdata);
@@ -701,7 +701,7 @@ ahd_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahd_linux_slave_configure(struct scsi_device *sdev)
+ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -906,8 +906,8 @@ struct scsi_host_template aic79xx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahd_linux_slave_alloc,
- .slave_configure = ahd_linux_slave_configure,
+ .sdev_init = ahd_linux_sdev_init,
+ .sdev_configure = ahd_linux_sdev_configure,
.target_alloc = ahd_linux_target_alloc,
.target_destroy = ahd_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b0c4f2345321..cebf8c5d0caf 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -632,7 +632,7 @@ ahc_linux_target_destroy(struct scsi_target *starget)
}
static int
-ahc_linux_slave_alloc(struct scsi_device *sdev)
+ahc_linux_sdev_init(struct scsi_device *sdev)
{
struct ahc_softc *ahc =
*((struct ahc_softc **)sdev->host->hostdata);
@@ -664,7 +664,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
}
static int
-ahc_linux_slave_configure(struct scsi_device *sdev)
+ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
if (bootverbose)
sdev_printk(KERN_INFO, sdev, "Slave Configure\n");
@@ -791,8 +791,8 @@ struct scsi_host_template aic7xxx_driver_template = {
.this_id = -1,
.max_sectors = 8192,
.cmd_per_lun = 2,
- .slave_alloc = ahc_linux_slave_alloc,
- .slave_configure = ahc_linux_slave_configure,
+ .sdev_init = ahc_linux_sdev_init,
+ .sdev_configure = ahc_linux_sdev_configure,
.target_alloc = ahc_linux_target_alloc,
.target_destroy = ahc_linux_target_destroy,
};
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index 65182ad9cdf8..b1c9ce477cbd 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -102,6 +102,7 @@ static void add_conditional(symbol_t *symbol);
static void add_version(const char *verstring);
static int is_download_const(expression_t *immed);
static int is_location_address(symbol_t *symbol);
+int yylex();
void yyerror(const char *string);
#define SRAM_SYMNAME "SRAM_BASE"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 8c0479865f04..5c7350eb5b5c 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -61,6 +61,7 @@
static symbol_t *macro_symbol;
static void add_macro_arg(const char *argtext, int position);
+int mmlex();
void mmerror(const char *string);
%}
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index c78d4f68eea5..fc7e6c58148d 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -64,6 +64,9 @@ static char *string_buf_ptr;
static int parren_count;
static int quote_count;
static char buf[255];
+void mm_switch_to_buffer(YY_BUFFER_STATE);
+void mmparse();
+void mm_delete_buffer(YY_BUFFER_STATE);
%}
PATH ([/]*[-A-Za-z0-9_.])+
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 9dda296c0152..e74393357025 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -731,7 +731,7 @@ static void asd_dl_tasklet_handler(unsigned long data)
goto next_1;
} else if (ascb->scb->header.opcode == EMPTY_SCB) {
goto out;
- } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
+ } else if (!ascb->uldd_timer && !timer_delete(&ascb->timer)) {
goto next_1;
}
spin_lock_irqsave(&seq->pend_q_lock, flags);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 538a5867e8ab..adf3d9145606 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -851,7 +851,7 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
* times out. Apparently we don't wait for the CONTROL PHY
* to complete, so it doesn't matter if we kill the timer.
*/
- del_timer_sync(&ascb->timer);
+ timer_delete_sync(&ascb->timer);
WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
list_del_init(pos);
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 27d32b8c2987..d45dbf98f25e 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -31,7 +31,7 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
res = asd_post_ascb_list(ascb->ha, ascb, 1);
if (unlikely(res))
- del_timer(&ascb->timer);
+ timer_delete(&ascb->timer);
return res;
}
@@ -58,7 +58,7 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
{
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __func__);
- if (!del_timer(&ascb->timer)) {
+ if (!timer_delete(&ascb->timer)) {
ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
return;
}
@@ -303,7 +303,7 @@ static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
{
struct tasklet_completion_status *tcs;
- if (!del_timer(&ascb->timer))
+ if (!timer_delete(&ascb->timer))
return;
tcs = ascb->uldd_task;
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index fbb29dbb1e50..003e61831e33 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -513,7 +513,7 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_host_put(esp->host);
}
-static struct pci_device_id am53c974_pci_tbl[] = {
+static const struct pci_device_id am53c974_pci_tbl[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index baeb5e795690..8e3d4799ce93 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -60,7 +60,7 @@
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -107,7 +107,7 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -155,7 +155,7 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
- struct bin_attribute *bin,
+ const struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
@@ -194,7 +194,7 @@ static const struct bin_attribute arcmsr_sysfs_message_read_attr = {
.mode = S_IRUSR ,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .read = arcmsr_sysfs_iop_message_read,
+ .read_new = arcmsr_sysfs_iop_message_read,
};
static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
@@ -203,7 +203,7 @@ static const struct bin_attribute arcmsr_sysfs_message_write_attr = {
.mode = S_IWUSR,
},
.size = ARCMSR_API_DATA_BUFLEN,
- .write = arcmsr_sysfs_iop_message_write,
+ .write_new = arcmsr_sysfs_iop_message_write,
};
static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
@@ -212,7 +212,7 @@ static const struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.mode = S_IWUSR,
},
.size = 1,
- .write = arcmsr_sysfs_iop_message_clear,
+ .write_new = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 35860c61468b..b450b1fc6bbb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -143,7 +143,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
static void arcmsr_set_iop_datetime(struct timer_list *);
-static int arcmsr_slave_config(struct scsi_device *sdev);
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -160,7 +161,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
- .slave_configure = arcmsr_slave_config,
+ .sdev_configure = arcmsr_sdev_configure,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
.can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
.this_id = ARCMSR_SCSI_INITIATOR_ID,
@@ -171,7 +172,7 @@ static const struct scsi_host_template arcmsr_scsi_host_template = {
.no_write_same = 1,
};
-static struct pci_device_id arcmsr_device_id_table[] = {
+static const struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
@@ -1044,7 +1045,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
{
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
- pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
add_timer(&pacb->refresh_timer);
}
@@ -1160,8 +1161,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_sysfs:
if (set_date_time)
- del_timer_sync(&acb->refresh_timer);
- del_timer_sync(&acb->eternal_timer);
+ timer_delete_sync(&acb->refresh_timer);
+ timer_delete_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -1203,9 +1204,9 @@ static int __maybe_unused arcmsr_suspend(struct device *dev)
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
- del_timer_sync(&acb->eternal_timer);
+ timer_delete_sync(&acb->eternal_timer);
if (set_date_time)
- del_timer_sync(&acb->refresh_timer);
+ timer_delete_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -1684,9 +1685,9 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb)
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
- del_timer_sync(&acb->eternal_timer);
+ timer_delete_sync(&acb->eternal_timer);
if (set_date_time)
- del_timer_sync(&acb->refresh_timer);
+ timer_delete_sync(&acb->refresh_timer);
pdev = acb->pdev;
arcmsr_free_irq(pdev, acb);
arcmsr_free_ccb_pool(acb);
@@ -1717,9 +1718,9 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
- del_timer_sync(&acb->eternal_timer);
+ timer_delete_sync(&acb->eternal_timer);
if (set_date_time)
- del_timer_sync(&acb->refresh_timer);
+ timer_delete_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -1764,9 +1765,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
if (acb->acb_flags & ACB_F_ADAPTER_REMOVED)
return;
- del_timer_sync(&acb->eternal_timer);
+ timer_delete_sync(&acb->eternal_timer);
if (set_date_time)
- del_timer_sync(&acb->refresh_timer);
+ timer_delete_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -3344,7 +3345,8 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd)
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static int arcmsr_slave_config(struct scsi_device *sdev)
+static int arcmsr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
unsigned int dev_timeout;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index e50a3dbf9de3..ef21b85cf014 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command)
case CHANGE_DEFINITION: case COMPARE: case COPY:
case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
- case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6:
case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
case WRITE_6: case WRITE_10: case WRITE_VERIFY:
case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 4ce0b2d73614..e0b55d869a35 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2331,7 +2331,7 @@ static void fas216_eh_timer(struct timer_list *t)
fas216_log(info, LOG_ERROR, "error handling timed out\n");
- del_timer(&info->eh_timer);
+ timer_delete(&info->eh_timer);
if (info->rst_bus_status == 0)
info->rst_bus_status = -1;
@@ -2532,7 +2532,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
*/
wait_event(info->eh_wait, info->rst_dev_status);
- del_timer_sync(&info->eh_timer);
+ timer_delete_sync(&info->eh_timer);
spin_lock_irqsave(&info->host_lock, flags);
info->rstSCpnt = NULL;
@@ -2622,7 +2622,7 @@ int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
* Wait one second for the interrupt.
*/
wait_event(info->eh_wait, info->rst_bus_status);
- del_timer_sync(&info->eh_timer);
+ timer_delete_sync(&info->eh_timer);
fas216_log(info, LOG_ERROR, "bus reset complete: %s\n",
info->rst_bus_status == 1 ? "success" : "failed");
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 928151ec927a..401242912855 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1743,7 +1743,7 @@ static const struct scsi_host_template atp870u_template = {
.max_sectors = ATP870U_MAX_SECTORS,
};
-static struct pci_device_id atp870u_id_table[] = {
+static const struct pci_device_id atp870u_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 76a1e373386e..7d1b767d87fb 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5448,7 +5448,7 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
"BM_%d : EEH error detected\n");
/* first stop UE detection when PCI error detected */
- del_timer_sync(&phba->hw_check);
+ timer_delete_sync(&phba->hw_check);
cancel_delayed_work_sync(&phba->recover_port);
/* sessions are no longer valid, so first fail the sessions */
@@ -5746,7 +5746,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
/* first stop UE detection before unloading */
- del_timer_sync(&phba->hw_check);
+ timer_delete_sync(&phba->hw_check);
cancel_delayed_work_sync(&phba->recover_port);
cancel_work_sync(&phba->sess_work);
@@ -5776,7 +5776,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
-static struct pci_error_handlers beiscsi_eeh_handlers = {
+static const struct pci_error_handlers beiscsi_eeh_handlers = {
.error_detected = beiscsi_eeh_err_detected,
.slot_reset = beiscsi_eeh_reset,
.resume = beiscsi_eeh_resume,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6aa1d3a7e24b..598f2fc93ef2 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -327,7 +327,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
case BFAD_E_EXIT_COMP:
bfa_sm_set_state(bfad, bfad_sm_uninit);
bfad_remove_intr(bfad);
- del_timer_sync(&bfad->hal_tmo);
+ timer_delete_sync(&bfad->hal_tmo);
break;
default:
@@ -376,7 +376,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
case BFAD_E_EXIT_COMP:
bfa_sm_set_state(bfad, bfad_sm_uninit);
bfad_remove_intr(bfad);
- del_timer_sync(&bfad->hal_tmo);
+ timer_delete_sync(&bfad->hal_tmo);
bfad_im_probe_undo(bfad);
bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
bfad_uncfg_pport(bfad);
@@ -1421,7 +1421,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* Suspend/fail all bfa operations */
bfa_ioc_suspend(&bfad->bfa.ioc);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- del_timer_sync(&bfad->hal_tmo);
+ timer_delete_sync(&bfad->hal_tmo);
ret = PCI_ERS_RESULT_CAN_RECOVER;
break;
case pci_channel_io_frozen: /* fatal error */
@@ -1435,7 +1435,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
wait_for_completion(&bfad->comp);
bfad_remove_intr(bfad);
- del_timer_sync(&bfad->hal_tmo);
+ timer_delete_sync(&bfad->hal_tmo);
pci_disable_device(pdev);
ret = PCI_ERS_RESULT_NEED_RESET;
break;
@@ -1566,7 +1566,7 @@ bfad_pci_mmio_enabled(struct pci_dev *pdev)
wait_for_completion(&bfad->comp);
bfad_remove_intr(bfad);
- del_timer_sync(&bfad->hal_tmo);
+ timer_delete_sync(&bfad->hal_tmo);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
@@ -1642,7 +1642,7 @@ MODULE_DEVICE_TABLE(pci, bfad_id_table);
/*
* PCI error recovery handlers.
*/
-static struct pci_error_handlers bfad_err_handler = {
+static const struct pci_error_handlers bfad_err_handler = {
.error_detected = bfad_pci_error_detected,
.slot_reset = bfad_pci_slot_reset,
.mmio_enabled = bfad_pci_mmio_enabled,
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 66fb701401de..a719a18f0fbc 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -25,7 +25,7 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
static void bfad_im_itnim_work_handler(struct work_struct *work);
static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd);
-static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static int bfad_im_sdev_init(struct scsi_device *sdev);
static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
@@ -404,10 +404,10 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
}
/*
- * Scsi_Host template entry slave_destroy.
+ * Scsi_Host template entry sdev_destroy.
*/
static void
-bfad_im_slave_destroy(struct scsi_device *sdev)
+bfad_im_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
return;
@@ -783,7 +783,7 @@ bfad_thread_workq(struct bfad_s *bfad)
* Return non-zero if fails.
*/
static int
-bfad_im_slave_configure(struct scsi_device *sdev)
+bfad_im_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, bfa_lun_queue_depth);
return 0;
@@ -800,9 +800,9 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -823,9 +823,9 @@ struct scsi_host_template bfad_im_vport_template = {
.eh_device_reset_handler = bfad_im_reset_lun_handler,
.eh_target_reset_handler = bfad_im_reset_target_handler,
- .slave_alloc = bfad_im_slave_alloc,
- .slave_configure = bfad_im_slave_configure,
- .slave_destroy = bfad_im_slave_destroy,
+ .sdev_init = bfad_im_sdev_init,
+ .sdev_configure = bfad_im_sdev_configure,
+ .sdev_destroy = bfad_im_sdev_destroy,
.this_id = -1,
.sg_tablesize = BFAD_IO_MAX_SGE,
@@ -915,7 +915,7 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
- * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Function is invoked from the SCSI Host Template sdev_init() entry point.
* Has the logic to query the LUN Mask database to check if this LUN needs to
* be made visible to the SCSI mid-layer or not.
*
@@ -946,10 +946,10 @@ bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
}
/*
- * Scsi_Host template entry slave_alloc
+ * Scsi_Host template entry sdev_init
*/
static int
-bfad_im_slave_alloc(struct scsi_device *sdev)
+bfad_im_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct bfad_itnim_data_s *itnim_data;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f49783b89d04..de6574cccf58 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1599,7 +1599,7 @@ static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
struct bnx2fc_hba *hba = interface->hba;
/* Stop the transmit retry timer */
- del_timer_sync(&port->timer);
+ timer_delete_sync(&port->timer);
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
@@ -1938,7 +1938,7 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&hba->destroy_timer);
+ timer_delete_sync(&hba->destroy_timer);
}
bnx2fc_unbind_adapter_devices(hba);
}
@@ -2610,14 +2610,11 @@ static int bnx2fc_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu);
- thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
- (void *)p, cpu_to_node(cpu),
- "bnx2fc_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2fc_percpu_io_thread,
+ (void *)p, cpu, "bnx2fc_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2652,7 +2649,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
return 0;
}
-static int bnx2fc_slave_configure(struct scsi_device *sdev)
+static int bnx2fc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (!bnx2fc_queue_depth)
return 0;
@@ -2951,7 +2949,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
.eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -2959,7 +2957,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.dma_boundary = 0x7fff,
.max_sectors = 0x3fbf,
.track_queue_depth = 1,
- .slave_configure = bnx2fc_slave_configure,
+ .sdev_configure = bnx2fc_sdev_configure,
.shost_groups = bnx2fc_host_groups,
.cmd_size = sizeof(struct bnx2fc_priv),
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index eb3209103312..b8227cfef64f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -74,7 +74,7 @@ static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
&tgt->flags)));
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&tgt->ofld_timer);
+ timer_delete_sync(&tgt->ofld_timer);
}
static void bnx2fc_offload_session(struct fcoe_port *port,
@@ -283,7 +283,7 @@ static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
&tgt->flags)));
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&tgt->upld_timer);
+ timer_delete_sync(&tgt->upld_timer);
}
static void bnx2fc_upload_session(struct fcoe_port *port,
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 872ad37e2a6e..cecc3a026762 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -415,14 +415,11 @@ static int bnx2i_cpu_online(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu);
- thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "bnx2i_thread/%d", cpu);
+ thread = kthread_create_on_cpu(bnx2i_percpu_io_thread, (void *)p,
+ cpu, "bnx2i_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- /* bind thread to the cpu */
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 9971f32a663c..6c80e5b514fd 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1626,7 +1626,7 @@ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+ timer_delete_sync(&bnx2i_conn->ep->ofld_timer);
iscsi_conn_start(cls_conn);
return 0;
@@ -1749,7 +1749,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&ep->ofld_timer);
+ timer_delete_sync(&ep->ofld_timer);
bnx2i_ep_destroy_list_del(hba, ep);
@@ -1861,7 +1861,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&bnx2i_ep->ofld_timer);
+ timer_delete_sync(&bnx2i_ep->ofld_timer);
bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
@@ -2100,7 +2100,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&bnx2i_ep->ofld_timer);
+ timer_delete_sync(&bnx2i_ep->ofld_timer);
destroy_conn:
bnx2i_ep_active_list_del(hba, bnx2i_ep);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e43c5413ce29..beded091dff1 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3701,7 +3701,7 @@ csio_mberr_worker(void *data)
struct csio_mb *mbp_next;
int rv;
- del_timer_sync(&mbm->timer);
+ timer_delete_sync(&mbm->timer);
spin_lock_irq(&hw->lock);
if (list_empty(&mbm->cbfn_q)) {
@@ -4210,7 +4210,7 @@ csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
static void
csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
{
- del_timer_sync(&mgmtm->mgmt_timer);
+ timer_delete_sync(&mgmtm->mgmt_timer);
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 9a3f2ed050bd..79c8dafdd49e 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1162,7 +1162,7 @@ err_resume_exit:
dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
}
-static struct pci_error_handlers csio_err_handler = {
+static const struct pci_error_handlers csio_err_handler = {
.error_detected = csio_pci_error_detected,
.slot_reset = csio_pci_slot_reset,
.resume = csio_pci_resume,
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 94810b19e747..c7b4c464f6b8 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1619,7 +1619,7 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
mbp = mbm->mcurrent;
/* Stop mailbox completion timer */
- del_timer_sync(&mbm->timer);
+ timer_delete_sync(&mbm->timer);
/* Add completion to tail of cbfn queue */
list_add_tail(&mbp->list, cbfn_q);
@@ -1682,7 +1682,7 @@ csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
void
csio_mbm_exit(struct csio_mbm *mbm)
{
- del_timer_sync(&mbm->timer);
+ timer_delete_sync(&mbm->timer);
CSIO_DB_ASSERT(mbm->mcurrent == NULL);
CSIO_DB_ASSERT(list_empty(&mbm->req_q));
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 8329f0cab4e7..34bde6650fae 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -800,7 +800,7 @@ csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
rn = req->rnode;
/*
* FW says remote device is lost, but rnode
- * doesnt reflect it.
+ * doesn't reflect it.
*/
if (csio_scsi_itnexus_loss_error(req->wr_status) &&
csio_is_rnode_ready(rn)) {
@@ -2224,7 +2224,7 @@ fail:
}
static int
-csio_slave_alloc(struct scsi_device *sdev)
+csio_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2237,14 +2237,14 @@ csio_slave_alloc(struct scsi_device *sdev)
}
static int
-csio_slave_configure(struct scsi_device *sdev)
+csio_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_change_queue_depth(sdev, csio_lun_qdepth);
return 0;
}
static void
-csio_slave_destroy(struct scsi_device *sdev)
+csio_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -2276,9 +2276,9 @@ struct scsi_host_template csio_fcoe_shost_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
@@ -2295,9 +2295,9 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
- .slave_alloc = csio_slave_alloc,
- .slave_configure = csio_slave_configure,
- .slave_destroy = csio_slave_destroy,
+ .sdev_init = csio_sdev_init,
+ .sdev_configure = csio_sdev_configure,
+ .sdev_destroy = csio_sdev_destroy,
.scan_finished = csio_scan_finished,
.this_id = -1,
.sg_tablesize = CSIO_SCSI_MAX_SGE,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index ec6530240707..461d38e2fb19 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -495,7 +495,7 @@ static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
spin_lock_bh(&csk->lock);
if (csk->retry_timer.function) {
- del_timer(&csk->retry_timer);
+ timer_delete(&csk->retry_timer);
csk->retry_timer.function = NULL;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index c07d2e3b4bcf..aaba294ecb58 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -930,7 +930,7 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
csk, csk->state, csk->flags, csk->tid);
if (csk->retry_timer.function) {
- del_timer(&csk->retry_timer);
+ timer_delete(&csk->retry_timer);
csk->retry_timer.function = NULL;
}
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
deleted file mode 100644
index 5533bdcb0458..000000000000
--- a/drivers/scsi/cxlflash/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM CXL-attached Flash Accelerator SCSI Driver
-#
-
-config CXLFLASH
- tristate "Support for IBM CAPI Flash"
- depends on PCI && SCSI && (CXL || OCXL) && EEH
- select IRQ_POLL
- default m
- help
- Allows CAPI Accelerated IO to Flash
- If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
deleted file mode 100644
index fd2f0dd9daf9..000000000000
--- a/drivers/scsi/cxlflash/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
-cxlflash-$(CONFIG_CXL) += cxl_hw.o
-cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
deleted file mode 100644
index 181e0445ed42..000000000000
--- a/drivers/scsi/cxlflash/backend.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#ifndef _CXLFLASH_BACKEND_H
-#define _CXLFLASH_BACKEND_H
-
-extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
-extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
-
-struct cxlflash_backend_ops {
- struct module *module;
- void __iomem * (*psa_map)(void *ctx_cookie);
- void (*psa_unmap)(void __iomem *addr);
- int (*process_element)(void *ctx_cookie);
- int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
- void *cookie, char *name);
- void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
- u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
- int (*start_context)(void *ctx_cookie);
- int (*stop_context)(void *ctx_cookie);
- int (*afu_reset)(void *ctx_cookie);
- void (*set_master)(void *ctx_cookie);
- void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
- void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
- int (*release_context)(void *ctx_cookie);
- void (*perst_reloads_same_image)(void *afu_cookie, bool image);
- ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
- size_t count);
- int (*allocate_afu_irqs)(void *ctx_cookie, int num);
- void (*free_afu_irqs)(void *ctx_cookie);
- void * (*create_afu)(struct pci_dev *dev);
- void (*destroy_afu)(void *afu_cookie);
- struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
- int *fd);
- void * (*fops_get_context)(struct file *file);
- int (*start_work)(void *ctx_cookie, u64 irqs);
- int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
- int (*fd_release)(struct inode *inode, struct file *file);
-};
-
-#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
deleted file mode 100644
index de6229e27b48..000000000000
--- a/drivers/scsi/cxlflash/common.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_COMMON_H
-#define _CXLFLASH_COMMON_H
-
-#include <linux/async.h>
-#include <linux/cdev.h>
-#include <linux/irq_poll.h>
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-extern const struct file_operations cxlflash_cxl_fops;
-
-#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
-#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */
-#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */
-
-#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
-#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
-
-#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
-#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
-#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
-
-#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
-#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
-#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
- * max_sectors
- * in units of
- * 512 byte
- * sectors
- */
-
-#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
-
-/* AFU command retry limit */
-#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */
-
-/* Command management definitions */
-#define CXLFLASH_MAX_CMDS 256
-#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
-
-/* RRQ for master issued cmds */
-#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* SQ for master issued cmds */
-#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* Hardware queue definitions */
-#define CXLFLASH_DEF_HWQS 1
-#define CXLFLASH_MAX_HWQS 8
-#define PRIMARY_HWQ 0
-
-
-static inline void check_sizes(void)
-{
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
-}
-
-/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
-#define CMD_BUFSIZE SIZE_4K
-
-enum cxlflash_lr_state {
- LINK_RESET_INVALID,
- LINK_RESET_REQUIRED,
- LINK_RESET_COMPLETE
-};
-
-enum cxlflash_init_state {
- INIT_STATE_NONE,
- INIT_STATE_PCI,
- INIT_STATE_AFU,
- INIT_STATE_SCSI,
- INIT_STATE_CDEV
-};
-
-enum cxlflash_state {
- STATE_PROBING, /* Initial state during probe */
- STATE_PROBED, /* Temporary state, probe completed but EEH occurred */
- STATE_NORMAL, /* Normal running state, everything good */
- STATE_RESET, /* Reset state, trying to reset/recover */
- STATE_FAILTERM /* Failed/terminating state, error out users/threads */
-};
-
-enum cxlflash_hwq_mode {
- HWQ_MODE_RR, /* Roundrobin (default) */
- HWQ_MODE_TAG, /* Distribute based on block MQ tag */
- HWQ_MODE_CPU, /* CPU affinity */
- MAX_HWQ_MODE
-};
-
-/*
- * Each context has its own set of resource handles that is visible
- * only from that context.
- */
-
-struct cxlflash_cfg {
- struct afu *afu;
-
- const struct cxlflash_backend_ops *ops;
- struct pci_dev *dev;
- struct pci_device_id *dev_id;
- struct Scsi_Host *host;
- int num_fc_ports;
- struct cdev cdev;
- struct device *chardev;
-
- ulong cxlflash_regs_pci;
-
- struct work_struct work_q;
- enum cxlflash_init_state init_state;
- enum cxlflash_lr_state lr_state;
- int lr_port;
- atomic_t scan_host_needed;
-
- void *afu_cookie;
-
- atomic_t recovery_threads;
- struct mutex ctx_recovery_mutex;
- struct mutex ctx_tbl_list_mutex;
- struct rw_semaphore ioctl_rwsem;
- struct ctx_info *ctx_tbl[MAX_CONTEXT];
- struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
- struct file_operations cxl_fops;
-
- /* Parameters that are LUN table related */
- int last_lun_index[MAX_FC_PORTS];
- int promote_lun_index;
- struct list_head lluns; /* list of llun_info structs */
-
- wait_queue_head_t tmf_waitq;
- spinlock_t tmf_slock;
- bool tmf_active;
- bool ws_unmap; /* Write-same unmap supported */
- wait_queue_head_t reset_waitq;
- enum cxlflash_state state;
- async_cookie_t async_reset_cookie;
-};
-
-struct afu_cmd {
- struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
- struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- struct afu *parent;
- struct scsi_cmnd *scp;
- struct completion cevent;
- struct list_head queue;
- u32 hwq_index;
-
- u8 cmd_tmf:1,
- cmd_aborted:1;
-
- struct list_head list; /* Pending commands link */
-
- /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
- * However for performance reasons the IOARCB/IOASA should be
- * cache line aligned.
- */
-} __aligned(cache_line_size());
-
-static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
-{
- return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
-}
-
-static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- INIT_LIST_HEAD(&afuc->queue);
- return afuc;
-}
-
-static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- memset(afuc, 0, sizeof(*afuc));
- return sc_to_afuci(sc);
-}
-
-struct hwq {
- /* Stuff requiring alignment go first. */
- struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
-
- /* Beware of alignment till here. Preferably introduce new
- * fields after this point
- */
- struct afu *afu;
- void *ctx_cookie;
- struct sisl_host_map __iomem *host_map; /* MC host map */
- struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
- ctx_hndl_t ctx_hndl; /* master's context handle */
- u32 index; /* Index of this hwq */
- int num_irqs; /* Number of interrupts requested for context */
- struct list_head pending_cmds; /* Commands pending completion */
-
- atomic_t hsq_credits;
- spinlock_t hsq_slock; /* Hardware send queue lock */
- struct sisl_ioarcb *hsq_start;
- struct sisl_ioarcb *hsq_end;
- struct sisl_ioarcb *hsq_curr;
- spinlock_t hrrq_slock;
- u64 *hrrq_start;
- u64 *hrrq_end;
- u64 *hrrq_curr;
- bool toggle;
- bool hrrq_online;
-
- s64 room;
-
- struct irq_poll irqpoll;
-} __aligned(cache_line_size());
-
-struct afu {
- struct hwq hwqs[CXLFLASH_MAX_HWQS];
- int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
- int (*context_reset)(struct hwq *hwq);
-
- /* AFU HW */
- struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
-
- atomic_t cmds_active; /* Number of currently active AFU commands */
- struct mutex sync_active; /* Mutex to serialize AFU commands */
- u64 hb;
- u32 internal_lun; /* User-desired LUN mode for this AFU */
-
- u32 num_hwqs; /* Number of hardware queues */
- u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */
- enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
- u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */
-
- char version[16];
- u64 interface_version;
-
- u32 irqpoll_weight;
- struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
-};
-
-static inline struct hwq *get_hwq(struct afu *afu, u32 index)
-{
- WARN_ON(index >= CXLFLASH_MAX_HWQS);
-
- return &afu->hwqs[index];
-}
-
-static inline bool afu_is_irqpoll_enabled(struct afu *afu)
-{
- return !!afu->irqpoll_weight;
-}
-
-static inline bool afu_has_cap(struct afu *afu, u64 cap)
-{
- u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
-
- return afu_cap & cap;
-}
-
-static inline bool afu_is_ocxl_lisn(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
-}
-
-static inline bool afu_is_afu_debug(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
-}
-
-static inline bool afu_is_lun_provision(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
-}
-
-static inline bool afu_is_sq_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
-}
-
-static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
-}
-
-static inline u64 lun_to_lunid(u64 lun)
-{
- __be64 lun_id;
-
- int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
- return be64_to_cpu(lun_id);
-}
-
-static inline struct fc_port_bank __iomem *get_fc_port_bank(
- struct cxlflash_cfg *cfg, int i)
-{
- struct afu *afu = cfg->afu;
-
- return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
-}
-
-static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
-}
-
-static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
-}
-
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
-void cxlflash_list_init(void);
-void cxlflash_term_global_luns(void);
-void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg);
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
deleted file mode 100644
index b814130f3f5c..000000000000
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <misc/cxl.h>
-
-#include "backend.h"
-
-/*
- * The following routines map the cxlflash backend operations to existing CXL
- * kernel API function and are largely simple shims that provide an abstraction
- * for converting generic context and AFU cookies into cxl_context or cxl_afu
- * pointers.
- */
-
-static void __iomem *cxlflash_psa_map(void *ctx_cookie)
-{
- return cxl_psa_map(ctx_cookie);
-}
-
-static void cxlflash_psa_unmap(void __iomem *addr)
-{
- cxl_psa_unmap(addr);
-}
-
-static int cxlflash_process_element(void *ctx_cookie)
-{
- return cxl_process_element(ctx_cookie);
-}
-
-static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
-}
-
-static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- cxl_unmap_afu_irq(ctx_cookie, num, cookie);
-}
-
-static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- /* Dummy fop for cxl */
- return 0;
-}
-
-static int cxlflash_start_context(void *ctx_cookie)
-{
- return cxl_start_context(ctx_cookie, 0, NULL);
-}
-
-static int cxlflash_stop_context(void *ctx_cookie)
-{
- return cxl_stop_context(ctx_cookie);
-}
-
-static int cxlflash_afu_reset(void *ctx_cookie)
-{
- return cxl_afu_reset(ctx_cookie);
-}
-
-static void cxlflash_set_master(void *ctx_cookie)
-{
- cxl_set_master(ctx_cookie);
-}
-
-static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_get_context(dev);
-}
-
-static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_dev_context_init(dev);
-}
-
-static int cxlflash_release_context(void *ctx_cookie)
-{
- return cxl_release_context(ctx_cookie);
-}
-
-static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- cxl_perst_reloads_same_image(afu_cookie, image);
-}
-
-static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
- void *buf, size_t count)
-{
- return cxl_read_adapter_vpd(dev, buf, count);
-}
-
-static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return cxl_allocate_afu_irqs(ctx_cookie, num);
-}
-
-static void cxlflash_free_afu_irqs(void *ctx_cookie)
-{
- cxl_free_afu_irqs(ctx_cookie);
-}
-
-static void *cxlflash_create_afu(struct pci_dev *dev)
-{
- return cxl_pci_to_afu(dev);
-}
-
-static void cxlflash_destroy_afu(void *afu)
-{
- /* Dummy fop for cxl */
-}
-
-static struct file *cxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- return cxl_get_fd(ctx_cookie, fops, fd);
-}
-
-static void *cxlflash_fops_get_context(struct file *file)
-{
- return cxl_fops_get_context(file);
-}
-
-static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
-{
- struct cxl_ioctl_start_work work = { 0 };
-
- work.num_interrupts = irqs;
- work.flags = CXL_START_WORK_NUM_IRQS;
-
- return cxl_start_work(ctx_cookie, &work);
-}
-
-static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return cxl_fd_mmap(file, vm);
-}
-
-static int cxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return cxl_fd_release(inode, file);
-}
-
-const struct cxlflash_backend_ops cxlflash_cxl_ops = {
- .module = THIS_MODULE,
- .psa_map = cxlflash_psa_map,
- .psa_unmap = cxlflash_psa_unmap,
- .process_element = cxlflash_process_element,
- .map_afu_irq = cxlflash_map_afu_irq,
- .unmap_afu_irq = cxlflash_unmap_afu_irq,
- .get_irq_objhndl = cxlflash_get_irq_objhndl,
- .start_context = cxlflash_start_context,
- .stop_context = cxlflash_stop_context,
- .afu_reset = cxlflash_afu_reset,
- .set_master = cxlflash_set_master,
- .get_context = cxlflash_get_context,
- .dev_context_init = cxlflash_dev_context_init,
- .release_context = cxlflash_release_context,
- .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
- .read_adapter_vpd = cxlflash_read_adapter_vpd,
- .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
- .free_afu_irqs = cxlflash_free_afu_irqs,
- .create_afu = cxlflash_create_afu,
- .destroy_afu = cxlflash_destroy_afu,
- .get_fd = cxlflash_get_fd,
- .fops_get_context = cxlflash_fops_get_context,
- .start_work = cxlflash_start_work,
- .fd_mmap = cxlflash_fd_mmap,
- .fd_release = cxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
deleted file mode 100644
index 962c797fda07..000000000000
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/unaligned.h>
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * create_local() - allocate and initialize a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated local llun_info structure on success, NULL on failure
- */
-static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
-
- lli = kzalloc(sizeof(*lli), GFP_KERNEL);
- if (unlikely(!lli)) {
- dev_err(dev, "%s: could not allocate lli\n", __func__);
- goto out;
- }
-
- lli->sdev = sdev;
- lli->host_no = sdev->host->host_no;
- lli->in_table = false;
-
- memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return lli;
-}
-
-/**
- * create_global() - allocate and initialize a global LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated global glun_info structure on success, NULL on failure
- */
-static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = NULL;
-
- gli = kzalloc(sizeof(*gli), GFP_KERNEL);
- if (unlikely(!gli)) {
- dev_err(dev, "%s: could not allocate gli\n", __func__);
- goto out;
- }
-
- mutex_init(&gli->mutex);
- memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return gli;
-}
-
-/**
- * lookup_local() - find a local LUN information structure by WWID
- * @cfg: Internal structure associated with the host.
- * @wwid: WWID associated with LUN.
- *
- * Return: Found local lun_info structure on success, NULL on failure
- */
-static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
-{
- struct llun_info *lli, *temp;
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
- if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return lli;
-
- return NULL;
-}
-
-/**
- * lookup_global() - find a global LUN information structure by WWID
- * @wwid: WWID associated with LUN.
- *
- * Return: Found global lun_info structure on success, NULL on failure
- */
-static struct glun_info *lookup_global(u8 *wwid)
-{
- struct glun_info *gli, *temp;
-
- list_for_each_entry_safe(gli, temp, &global.gluns, list)
- if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return gli;
-
- return NULL;
-}
-
-/**
- * find_and_create_lun() - find or create a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: WWID associated with LUN.
- *
- * The LUN is kept both in a local list (per adapter) and in a global list
- * (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask, etc.).
- *
- * The block allocation map is shared across all adapters (i.e. associated
- * wih the global list). Since different attributes are associated with
- * the per adapter and global entries, allocate two separate structures for each
- * LUN (one local, one global).
- *
- * Keep a pointer back from the local to the global entry.
- *
- * This routine assumes the caller holds the global mutex.
- *
- * Return: Found/Allocated local lun_info structure on success, NULL on failure
- */
-static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- struct glun_info *gli = NULL;
-
- if (unlikely(!wwid))
- goto out;
-
- lli = lookup_local(cfg, wwid);
- if (lli)
- goto out;
-
- lli = create_local(sdev, wwid);
- if (unlikely(!lli))
- goto out;
-
- gli = lookup_global(wwid);
- if (gli) {
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
- goto out;
- }
-
- gli = create_global(sdev, wwid);
- if (unlikely(!gli)) {
- kfree(lli);
- lli = NULL;
- goto out;
- }
-
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
-
- list_add(&gli->list, &global.gluns);
-
-out:
- dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
- return lli;
-}
-
-/**
- * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- list_del(&lli->list);
- kfree(lli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_list_init() - initializes the global LUN list
- */
-void cxlflash_list_init(void)
-{
- INIT_LIST_HEAD(&global.gluns);
- mutex_init(&global.mutex);
- global.err_page = NULL;
-}
-
-/**
- * cxlflash_term_global_luns() - frees resources associated with global LUN list
- */
-void cxlflash_term_global_luns(void)
-{
- struct glun_info *gli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(gli, temp, &global.gluns, list) {
- list_del(&gli->list);
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- kfree(gli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_manage_lun() - handles LUN management activities
- * @sdev: SCSI device associated with LUN.
- * @arg: Manage ioctl data structure.
- *
- * This routine is used to notify the driver about a LUN's WWID and associate
- * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
- * change a LUN's operating mode: legacy or superpipe.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_manage_lun(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_manage_lun *manage = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- int rc = 0;
- u64 flags = manage->hdr.flags;
- u32 chan = sdev->channel;
-
- mutex_lock(&global.mutex);
- lli = find_and_create_lun(sdev, manage->wwid);
- dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
- __func__, get_unaligned_be64(&manage->wwid[0]),
- get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
- if (unlikely(!lli)) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
- /*
- * Update port selection mask based upon channel, store off LUN
- * in unpacked, AFU-friendly format, and hang LUN reference in
- * the sdev.
- */
- lli->port_sel |= CHAN2PORTMASK(chan);
- lli->lun_id[chan] = lun_to_lunid(sdev->lun);
- sdev->hostdata = lli;
- } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
- if (lli->parent->mode != MODE_NONE)
- rc = -EBUSY;
- else {
- /*
- * Clean up local LUN for this port and reset table
- * tracking when no more references exist.
- */
- sdev->hostdata = NULL;
- lli->port_sel &= ~CHAN2PORTMASK(chan);
- if (lli->port_sel == 0U)
- lli->in_table = false;
- }
- }
-
- dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
- __func__, lli->port_sel, chan, lli->lun_id[chan]);
-
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
deleted file mode 100644
index 60d62b93d624..000000000000
--- a/drivers/scsi/cxlflash/main.c
+++ /dev/null
@@ -1,3968 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <linux/unaligned.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "main.h"
-#include "sislite.h"
-#include "common.h"
-
-MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
-MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
-MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
-static const struct class cxlflash_class = {
- .name = "cxlflash",
- .devnode = cxlflash_devnode,
-};
-
-static u32 cxlflash_major;
-static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
-
-/**
- * process_cmd_err() - command error handler
- * @cmd: AFU command that experienced the error.
- * @scp: SCSI command associated with the AFU command in error.
- *
- * Translates error bits from AFU command to SCSI command results.
- */
-static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
-{
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioasa *ioasa;
- u32 resid;
-
- ioasa = &(cmd->sa);
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
- resid = ioasa->resid;
- scsi_set_resid(scp, resid);
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
- __func__, cmd, scp, resid);
- }
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
- scp->result = (DID_ERROR << 16);
- }
-
- dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
- "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
- ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
- ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
-
- if (ioasa->rc.scsi_rc) {
- /* We have a SCSI status */
- if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
- memcpy(scp->sense_buffer, ioasa->sense_data,
- SISL_SENSE_DATA_LEN);
- scp->result = ioasa->rc.scsi_rc;
- } else
- scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
- }
-
- /*
- * We encountered an error. Set scp->result based on nature
- * of error.
- */
- if (ioasa->rc.fc_rc) {
- /* We have an FC status */
- switch (ioasa->rc.fc_rc) {
- case SISL_FC_RC_LINKDOWN:
- scp->result = (DID_REQUEUE << 16);
- break;
- case SISL_FC_RC_RESID:
- /* This indicates an FCP resid underrun */
- if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
- /* If the SISL_RC_FLAGS_OVERRUN flag was set,
- * then we will handle this error else where.
- * If not then we must handle it here.
- * This is probably an AFU bug.
- */
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_FC_RC_RESIDERR:
- /* Resid mismatch between adapter and device */
- case SISL_FC_RC_TGTABORT:
- case SISL_FC_RC_ABORTOK:
- case SISL_FC_RC_ABORTFAIL:
- case SISL_FC_RC_NOLOGI:
- case SISL_FC_RC_ABORTPEND:
- case SISL_FC_RC_WRABORTPEND:
- case SISL_FC_RC_NOEXP:
- case SISL_FC_RC_INUSE:
- scp->result = (DID_ERROR << 16);
- break;
- }
- }
-
- if (ioasa->rc.afu_rc) {
- /* We have an AFU error */
- switch (ioasa->rc.afu_rc) {
- case SISL_AFU_RC_NO_CHANNELS:
- scp->result = (DID_NO_CONNECT << 16);
- break;
- case SISL_AFU_RC_DATA_DMA_ERR:
- switch (ioasa->afu_extra) {
- case SISL_AFU_DMA_ERR_PAGE_IN:
- /* Retry */
- scp->result = (DID_IMM_RETRY << 16);
- break;
- case SISL_AFU_DMA_ERR_INVALID_EA:
- default:
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_AFU_RC_OUT_OF_DATA_BUFS:
- /* Retry */
- scp->result = (DID_ERROR << 16);
- break;
- default:
- scp->result = (DID_ERROR << 16);
- }
- }
-}
-
-/**
- * cmd_complete() - command completion handler
- * @cmd: AFU command that has completed.
- *
- * For SCSI commands this routine prepares and submits commands that have
- * either completed or timed out to the SCSI stack. For internal commands
- * (TMF or AFU), this routine simply notifies the originator that the
- * command has completed.
- */
-static void cmd_complete(struct afu_cmd *cmd)
-{
- struct scsi_cmnd *scp;
- ulong lock_flags;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- if (cmd->scp) {
- scp = cmd->scp;
- if (unlikely(cmd->sa.ioasc))
- process_cmd_err(cmd, scp);
- else
- scp->result = (DID_OK << 16);
-
- dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
- __func__, scp, scp->result, cmd->sa.ioasc);
- scsi_done(scp);
- } else if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- } else
- complete(&cmd->cevent);
-}
-
-/**
- * flush_pending_cmds() - flush all pending commands on this hardware queue
- * @hwq: Hardware queue to flush.
- *
- * The hardware send queue lock associated with this hardware queue must be
- * held when calling this routine.
- */
-static void flush_pending_cmds(struct hwq *hwq)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct afu_cmd *cmd, *tmp;
- struct scsi_cmnd *scp;
- ulong lock_flags;
-
- list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
- /* Bypass command when on a doneq, cmd_complete() will handle */
- if (!list_empty(&cmd->queue))
- continue;
-
- list_del(&cmd->list);
-
- if (cmd->scp) {
- scp = cmd->scp;
- scp->result = (DID_IMM_RETRY << 16);
- scsi_done(scp);
- } else {
- cmd->cmd_aborted = true;
-
- if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock,
- lock_flags);
- } else
- complete(&cmd->cevent);
- }
- }
-}
-
-/**
- * context_reset() - reset context via specified register
- * @hwq: Hardware queue owning the context to be reset.
- * @reset_reg: MMIO register to perform reset.
- *
- * When the reset is successful, the SISLite specification guarantees that
- * the AFU has aborted all currently pending I/O. Accordingly, these commands
- * must be flushed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = -ETIMEDOUT;
- int nretry = 0;
- u64 val = 0x1;
- ulong lock_flags;
-
- dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- writeq_be(val, reset_reg);
- do {
- val = readq_be(reset_reg);
- if ((val & 0x1) == 0x0) {
- rc = 0;
- break;
- }
-
- /* Double delay each time */
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- if (!rc)
- flush_pending_cmds(hwq);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
- __func__, rc, val, nretry);
- return rc;
-}
-
-/**
- * context_reset_ioarrin() - reset context via IOARRIN register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_ioarrin(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->ioarrin);
-}
-
-/**
- * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_sq(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
-}
-
-/**
- * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- s64 room;
- ulong lock_flags;
-
- /*
- * To avoid the performance penalty of MMIO, spread the update of
- * 'room' over multiple commands.
- */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- if (--hwq->room < 0) {
- room = readq_be(&hwq->host_map->cmd_room);
- if (room <= 0) {
- dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
- "0x%02X, room=0x%016llX\n",
- __func__, cmd->rcb.cdb[0], room);
- hwq->room = 0;
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- hwq->room = room - 1;
- }
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
-out:
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
- __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
- return rc;
-}
-
-/**
- * send_cmd_sq() - sends an AFU command via SQ ring
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- int newval;
- ulong lock_flags;
-
- newval = atomic_dec_if_positive(&hwq->hsq_credits);
- if (newval <= 0) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
- cmd->rcb.ioasa = &cmd->sa;
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- *hwq->hsq_curr = cmd->rcb;
- if (hwq->hsq_curr < hwq->hsq_end)
- hwq->hsq_curr++;
- else
- hwq->hsq_curr = hwq->hsq_start;
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-out:
- dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
- "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
- cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
- readq_be(&hwq->host_map->sq_head),
- readq_be(&hwq->host_map->sq_tail));
- return rc;
-}
-
-/**
- * wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command that was sent.
- *
- * Return: 0 on success, -errno on failure
- */
-static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
-
- timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- rc = -ETIMEDOUT;
-
- if (cmd->cmd_aborted)
- rc = -EAGAIN;
-
- if (unlikely(cmd->sa.ioasc != 0)) {
- dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
- __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -EIO;
- }
-
- return rc;
-}
-
-/**
- * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- * @afu: SCSI command to send.
- *
- * Hashes a command based upon the hardware queue mode.
- *
- * Return: Trusted index of target hardware queue
- */
-static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
- struct afu *afu)
-{
- u32 tag;
- u32 hwq = 0;
-
- if (afu->num_hwqs == 1)
- return 0;
-
- switch (afu->hwq_mode) {
- case HWQ_MODE_RR:
- hwq = afu->hwq_rr_count++ % afu->num_hwqs;
- break;
- case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
- hwq = blk_mq_unique_tag_to_hwq(tag);
- break;
- case HWQ_MODE_CPU:
- hwq = smp_processor_id() % afu->num_hwqs;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- return hwq;
-}
-
-/**
- * send_tmf() - sends a Task Management Function (TMF)
- * @cfg: Internal structure associated with the host.
- * @sdev: SCSI device destined for TMF.
- * @tmfcmd: TMF command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
- */
-static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
- u64 tmfcmd)
-{
- struct afu *afu = cfg->afu;
- struct afu_cmd *cmd = NULL;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- bool needs_deletion = false;
- char *buf = NULL;
- ulong lock_flags;
- int rc = 0;
- ulong to;
-
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
- INIT_LIST_HEAD(&cmd->queue);
-
- /* When Task Management Function is active do not send another */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- cfg->tmf_active = true;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- cmd->parent = afu;
- cmd->cmd_tmf = true;
- cmd->hwq_index = hwq->index;
-
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
- cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN |
- SISL_REQ_FLAGS_TMF_CMD);
- memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- goto out;
- }
-
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- to = msecs_to_jiffies(5000);
- to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock,
- to);
- if (!to) {
- dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -ETIMEDOUT;
- needs_deletion = true;
- } else if (cmd->cmd_aborted) {
- dev_err(dev, "%s: TMF aborted\n", __func__);
- rc = -EAGAIN;
- } else if (cmd->sa.ioasc) {
- dev_err(dev, "%s: TMF failed ioasc=%08x\n",
- __func__, cmd->sa.ioasc);
- rc = -EIO;
- }
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- if (needs_deletion) {
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- }
-out:
- kfree(buf);
- return rc;
-}
-
-/**
- * cxlflash_driver_info() - information handler for this host driver
- * @host: SCSI host associated with device.
- *
- * Return: A string describing the device.
- */
-static const char *cxlflash_driver_info(struct Scsi_Host *host)
-{
- return CXLFLASH_ADAPTER_NAME;
-}
-
-/**
- * cxlflash_queuecommand() - sends a mid-layer request
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- *
- * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
-{
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afuci(scp);
- struct scatterlist *sg = scsi_sglist(scp);
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
- u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
- ulong lock_flags;
- int rc = 0;
-
- dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n",
- __func__, scp, host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /*
- * If a Task Management Function is active, wait for it to complete
- * before continuing with regular commands.
- */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active) {
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- switch (cfg->state) {
- case STATE_PROBING:
- case STATE_PROBED:
- case STATE_RESET:
- dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- case STATE_FAILTERM:
- dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
- scp->result = (DID_NO_CONNECT << 16);
- scsi_done(scp);
- rc = 0;
- goto out;
- default:
- atomic_inc(&afu->cmds_active);
- break;
- }
-
- if (likely(sg)) {
- cmd->rcb.data_len = sg->length;
- cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
- }
-
- cmd->scp = scp;
- cmd->parent = afu;
- cmd->hwq_index = hwq_index;
-
- cmd->sa.ioasc = 0;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- if (scp->sc_data_direction == DMA_TO_DEVICE)
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- cmd->rcb.req_flags = req_flags;
- memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
-
- rc = afu->send_cmd(afu, cmd);
- atomic_dec(&afu->cmds_active);
-out:
- return rc;
-}
-
-/**
- * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
-
- if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->reset_waitq,
- !pci_channel_offline(pdev),
- CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
-}
-
-/**
- * free_mem() - free memory associated with the AFU
- * @cfg: Internal structure associated with the host.
- */
-static void free_mem(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
-
- if (cfg->afu) {
- free_pages((ulong)afu, get_order(sizeof(struct afu)));
- cfg->afu = NULL;
- }
-}
-
-/**
- * cxlflash_reset_sync() - synchronizing point for asynchronous resets
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
-{
- if (cfg->async_reset_cookie == 0)
- return;
-
- /* Wait until all async calls prior to this cookie have completed */
- async_synchronize_cookie(cfg->async_reset_cookie + 1);
- cfg->async_reset_cookie = 0;
-}
-
-/**
- * stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU in a partially allocated/initialized state.
- *
- * Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
- */
-static void stop_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- int i;
-
- cancel_work_sync(&cfg->work_q);
- if (!current_is_async())
- cxlflash_reset_sync(cfg);
-
- if (likely(afu)) {
- while (atomic_read(&afu->cmds_active))
- ssleep(1);
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- if (likely(afu->afu_map)) {
- cfg->ops->psa_unmap(afu->afu_map);
- afu->afu_map = NULL;
- }
- }
-}
-
-/**
- * term_intr() - disables all AFU interrupts
- * @cfg: Internal structure associated with the host.
- * @level: Depth of allocation, where to begin waterfall tear down.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
- u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- switch (level) {
- case UNMAP_THREE:
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (index == PRIMARY_HWQ)
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- fallthrough;
- case UNMAP_TWO:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- fallthrough;
- case UNMAP_ONE:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- fallthrough;
- case FREE_IRQ:
- cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- fallthrough;
- case UNDO_NOOP:
- /* No action required */
- break;
- }
-}
-
-/**
- * term_mc() - terminates the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- ulong lock_flags;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
- if (index != PRIMARY_HWQ)
- WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
- hwq->ctx_cookie = NULL;
-
- spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
- hwq->hrrq_online = false;
- spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- flush_pending_cmds(hwq);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-}
-
-/**
- * term_afu() - terminates the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_afu(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int k;
-
- /*
- * Tear down is carefully orchestrated to ensure
- * no interrupts can come in when the problem state
- * area is unmapped.
- *
- * 1) Disable all AFU interrupts for each master
- * 2) Unmap the problem state area
- * 3) Stop each master context
- */
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_intr(cfg, UNMAP_THREE, k);
-
- stop_afu(cfg);
-
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_mc(cfg, k);
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * notify_shutdown() - notifies device of pending shutdown
- * @cfg: Internal structure associated with the host.
- * @wait: Whether to wait for shutdown processing to complete.
- *
- * This function will notify the AFU that the adapter is being shutdown
- * and will wait for shutdown processing to complete if wait is true.
- * This notification should flush pending I/Os to the device and halt
- * further I/Os until the next AFU reset is issued and device restarted.
- */
-static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct dev_dependent_vals *ddv;
- __be64 __iomem *fc_port_regs;
- u64 reg, status;
- int i, retry_cnt = 0;
-
- ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
- if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
- return;
-
- if (!afu || !afu->afu_map) {
- dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
- return;
- }
-
- /* Notify AFU */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg |= SISL_FC_SHUTDOWN_NORMAL;
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
- }
-
- if (!wait)
- return;
-
- /* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
- retry_cnt = 0;
-
- while (true) {
- status = readq_be(&fc_port_regs[FC_STATUS / 8]);
- if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
- break;
- if (++retry_cnt >= MC_RETRY_CNT) {
- dev_dbg(dev, "%s: port %d shutdown processing "
- "not yet completed\n", __func__, i);
- break;
- }
- msleep(100 * retry_cnt);
- }
- }
-}
-
-/**
- * cxlflash_get_minor() - gets the first available minor number
- *
- * Return: Unique minor number that can be used to create the character device.
- */
-static int cxlflash_get_minor(void)
-{
- int minor;
- long bit;
-
- bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
- if (bit >= CXLFLASH_MAX_ADAPTERS)
- return -1;
-
- minor = bit & MINORMASK;
- set_bit(minor, cxlflash_minor);
- return minor;
-}
-
-/**
- * cxlflash_put_minor() - releases the minor number
- * @minor: Minor number that is no longer needed.
- */
-static void cxlflash_put_minor(int minor)
-{
- clear_bit(minor, cxlflash_minor);
-}
-
-/**
- * cxlflash_release_chrdev() - release the character device for the host
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
-{
- device_unregister(cfg->chardev);
- cfg->chardev = NULL;
- cdev_del(&cfg->cdev);
- cxlflash_put_minor(MINOR(cfg->cdev.dev));
-}
-
-/**
- * cxlflash_remove() - PCI entry point to tear down host
- * @pdev: PCI device associated with the host.
- *
- * Safe to use as a cleanup in partially allocated/initialized state. Note that
- * the reset_waitq is flushed as part of the stop/termination of user contexts.
- */
-static void cxlflash_remove(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- ulong lock_flags;
-
- if (!pci_is_enabled(pdev)) {
- dev_dbg(dev, "%s: Device is disabled\n", __func__);
- return;
- }
-
- /* Yield to running recovery threads before continuing with remove */
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- /* Notify AFU and wait for shutdown processing to complete */
- notify_shutdown(cfg, true);
-
- cfg->state = STATE_FAILTERM;
- cxlflash_stop_term_user_contexts(cfg);
-
- switch (cfg->init_state) {
- case INIT_STATE_CDEV:
- cxlflash_release_chrdev(cfg);
- fallthrough;
- case INIT_STATE_SCSI:
- cxlflash_term_local_luns(cfg);
- scsi_remove_host(cfg->host);
- fallthrough;
- case INIT_STATE_AFU:
- term_afu(cfg);
- fallthrough;
- case INIT_STATE_PCI:
- cfg->ops->destroy_afu(cfg->afu_cookie);
- pci_disable_device(pdev);
- fallthrough;
- case INIT_STATE_NONE:
- free_mem(cfg);
- scsi_host_put(cfg->host);
- break;
- }
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * alloc_mem() - allocates the AFU and its command pool
- * @cfg: Internal structure associated with the host.
- *
- * A partially allocated state remains on failure.
- *
- * Return:
- * 0 on success
- * -ENOMEM on failure to allocate memory
- */
-static int alloc_mem(struct cxlflash_cfg *cfg)
-{
- int rc = 0;
- struct device *dev = &cfg->dev->dev;
-
- /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
- cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct afu)));
- if (unlikely(!cfg->afu)) {
- dev_err(dev, "%s: cannot get %d free pages\n",
- __func__, get_order(sizeof(struct afu)));
- rc = -ENOMEM;
- goto out;
- }
- cfg->afu->parent = cfg;
- cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
- cfg->afu->afu_map = NULL;
-out:
- return rc;
-}
-
-/**
- * init_pci() - initializes the host as a PCI device
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_pci(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = pci_enable_device(pdev);
- if (rc || pci_channel_offline(pdev)) {
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- rc = pci_enable_device(pdev);
- }
-
- if (rc) {
- dev_err(dev, "%s: Cannot enable adapter\n", __func__);
- cxlflash_wait_for_pci_err_recovery(cfg);
- goto out;
- }
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_scsi(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = scsi_add_host(cfg->host, &pdev->dev);
- if (rc) {
- dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- scsi_scan_host(cfg->host);
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * set_port_online() - transitions the specified host FC port to online state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. Online state means
- * that the FC link layer has synced, completed the handshaking process, and
- * is ready for login to start.
- */
-static void set_port_online(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * set_port_offline() - transitions the specified host FC port to offline state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call.
- */
-static void set_port_offline(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * wait_port_online() - waits for the specified host FC port come online
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call. This will timeout
- * when the cable is not plugged in.
- *
- * Return:
- * TRUE (1) when the specified port is online
- * FALSE (0) when the specified port fails to come online after timeout
- */
-static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
-}
-
-/**
- * wait_port_offline() - waits for the specified host FC port go offline
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call.
- *
- * Return:
- * TRUE (1) when the specified port is offline
- * FALSE (0) when the specified port fails to go offline after timeout
- */
-static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
-}
-
-/**
- * afu_set_wwpn() - configures the WWPN for the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- * @wwpn: The world-wide-port-number previously discovered for port.
- *
- * The provided MMIO region must be mapped prior to call. As part of the
- * sequence to configure the WWPN, the port is toggled offline and then back
- * online. This toggling action can cause this routine to delay up to a few
- * seconds. When configured to use the internal LUN feature of the AFU, a
- * failure to come online is overridden.
- */
-static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
- u64 wwpn)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
- }
-
- writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
- }
-}
-
-/**
- * afu_link_reset() - resets the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. The sequence to
- * reset the port involves toggling it offline and then back online. This
- * action can cause this routine to delay up to a few seconds. An effort
- * is made to maintain link with the device by switching to host to use
- * the alternate port exclusively while the reset takes place.
- * failure to come online is overridden.
- */
-static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 port_sel;
-
- /* first switch the AFU to the other links, if any */
- port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- port_sel &= ~(1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
-
- /* switch back to include this port */
- port_sel |= (1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
-}
-
-/**
- * afu_err_intr_init() - clears and initializes the AFU for error interrupts
- * @afu: AFU associated with the host.
- */
-static void afu_err_intr_init(struct afu *afu)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- __be64 __iomem *fc_port_regs;
- int i;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 reg;
-
- /* global async interrupts: AFU clears afu_ctrl on context exit
- * if async interrupts were sent to that context. This prevents
- * the AFU form sending further async interrupts when
- * there is
- * nobody to receive them.
- */
-
- /* mask all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
- /* set LISN# to send and point to primary master context */
- reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
-
- if (afu->internal_lun)
- reg |= 1; /* Bit 63 indicates local lun */
- writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
- /* clear all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
- /* unmask bits that are of interest */
- /* note: afu can send an interrupt after this step */
- writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
- /* clear again in case a bit came on after previous clear but before */
- /* unmask */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
-
- /* Clear/Set internal lun bits */
- fc_port_regs = get_fc_port_regs(cfg, 0);
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg &= SISL_FC_INTERNAL_MASK;
- if (afu->internal_lun)
- reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
-
- /* now clear FC errors */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- /* sync interrupts for master's IOARRIN write */
- /* note that unlike asyncs, there can be no pending sync interrupts */
- /* at this time (this is a fresh context and master has not written */
- /* IOARRIN yet), so there is nothing to clear. */
-
- /* set LISN#, it is always sent to the context that wrote IOARRIN */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
- reg |= SISL_MSI_SYNC_ERROR;
- writeq_be(reg, &hwq->host_map->ctx_ctrl);
- writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
- }
-}
-
-/**
- * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 reg;
- u64 reg_unmasked;
-
- reg = readq_be(&hwq->host_map->intr_status);
- reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
-
- if (reg_unmasked == 0UL) {
- dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
- __func__, reg);
- goto cxlflash_sync_err_irq_exit;
- }
-
- dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
- __func__, reg);
-
- writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
-
-cxlflash_sync_err_irq_exit:
- return IRQ_HANDLED;
-}
-
-/**
- * process_hrrq() - process the read-response queue
- * @hwq: HWQ associated with the host.
- * @doneq: Queue of commands harvested from the RRQ.
- * @budget: Threshold of RRQ entries to process.
- *
- * This routine must be called holding the disabled RRQ spin lock.
- *
- * Return: The number of entries processed.
- */
-static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
-{
- struct afu *afu = hwq->afu;
- struct afu_cmd *cmd;
- struct sisl_ioasa *ioasa;
- struct sisl_ioarcb *ioarcb;
- bool toggle = hwq->toggle;
- int num_hrrq = 0;
- u64 entry,
- *hrrq_start = hwq->hrrq_start,
- *hrrq_end = hwq->hrrq_end,
- *hrrq_curr = hwq->hrrq_curr;
-
- /* Process ready RRQ entries up to the specified budget (if any) */
- while (true) {
- entry = *hrrq_curr;
-
- if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
- break;
-
- entry &= ~SISL_RESP_HANDLE_T_BIT;
-
- if (afu_is_sq_cmd_mode(afu)) {
- ioasa = (struct sisl_ioasa *)entry;
- cmd = container_of(ioasa, struct afu_cmd, sa);
- } else {
- ioarcb = (struct sisl_ioarcb *)entry;
- cmd = container_of(ioarcb, struct afu_cmd, rcb);
- }
-
- list_add_tail(&cmd->queue, doneq);
-
- /* Advance to next entry or wrap and flip the toggle bit */
- if (hrrq_curr < hrrq_end)
- hrrq_curr++;
- else {
- hrrq_curr = hrrq_start;
- toggle ^= SISL_RESP_HANDLE_T_BIT;
- }
-
- atomic_inc(&hwq->hsq_credits);
- num_hrrq++;
-
- if (budget > 0 && num_hrrq >= budget)
- break;
- }
-
- hwq->hrrq_curr = hrrq_curr;
- hwq->toggle = toggle;
-
- return num_hrrq;
-}
-
-/**
- * process_cmd_doneq() - process a queue of harvested RRQ commands
- * @doneq: Queue of completed commands.
- *
- * Note that upon return the queue can no longer be trusted.
- */
-static void process_cmd_doneq(struct list_head *doneq)
-{
- struct afu_cmd *cmd, *tmp;
-
- WARN_ON(list_empty(doneq));
-
- list_for_each_entry_safe(cmd, tmp, doneq, queue)
- cmd_complete(cmd);
-}
-
-/**
- * cxlflash_irqpoll() - process a queue of harvested RRQ commands
- * @irqpoll: IRQ poll structure associated with queue to poll.
- * @budget: Threshold of RRQ entries to process per poll.
- *
- * Return: The number of entries processed.
- */
-static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
-{
- struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- num_entries = process_hrrq(hwq, &doneq, budget);
- if (num_entries < budget)
- irq_poll_complete(irqpoll);
-
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- process_cmd_doneq(&doneq);
- return num_entries;
-}
-
-/**
- * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
- */
-static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- /* Silently drop spurious interrupts when queue is not online */
- if (!hwq->hrrq_online) {
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- irq_poll_sched(&hwq->irqpoll);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- num_entries = process_hrrq(hwq, &doneq, -1);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- if (num_entries == 0)
- return IRQ_NONE;
-
- process_cmd_doneq(&doneq);
- return IRQ_HANDLED;
-}
-
-/*
- * Asynchronous interrupt information table
- *
- * NOTE:
- * - Order matters here as this array is indexed by bit position.
- *
- * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
- * as complex and complains due to a lack of parentheses/braces.
- */
-#define ASTATUS_FC(_a, _b, _c, _d) \
- { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
-
-#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
- ASTATUS_FC(_a, LINK_UP, "link up", 0), \
- ASTATUS_FC(_a, LINK_DN, "link down", 0), \
- ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
- ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
- ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
- ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
- ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
- ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
-
-static const struct asyc_intr_info ainfo[] = {
- BUILD_SISL_ASTATUS_FC_PORT(1),
- BUILD_SISL_ASTATUS_FC_PORT(0),
- BUILD_SISL_ASTATUS_FC_PORT(3),
- BUILD_SISL_ASTATUS_FC_PORT(2)
-};
-
-/**
- * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- const struct asyc_intr_info *info;
- struct sisl_global_map __iomem *global = &afu->afu_map->global;
- __be64 __iomem *fc_port_regs;
- u64 reg_unmasked;
- u64 reg;
- u64 bit;
- u8 port;
-
- reg = readq_be(&global->regs.aintr_status);
- reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
-
- if (unlikely(reg_unmasked == 0)) {
- dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
- __func__, reg);
- goto out;
- }
-
- /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
- writeq_be(reg_unmasked, &global->regs.aintr_clear);
-
- /* Check each bit that is on */
- for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
- if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- info = &ainfo[bit];
- if (unlikely(info->status != 1ULL << bit)) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- port = info->port;
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
- __func__, port, info->desc,
- readq_be(&fc_port_regs[FC_STATUS / 8]));
-
- /*
- * Do link reset first, some OTHER errors will set FC_ERROR
- * again if cleared before or w/o a reset
- */
- if (info->action & LINK_RESET) {
- dev_err(dev, "%s: FC Port %d: resetting link\n",
- __func__, port);
- cfg->lr_state = LINK_RESET_REQUIRED;
- cfg->lr_port = port;
- schedule_work(&cfg->work_q);
- }
-
- if (info->action & CLR_FC_ERROR) {
- reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
-
- /*
- * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
- * should be the same and tracing one is sufficient.
- */
-
- dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
- __func__, port, reg);
-
- writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- if (info->action & SCAN_HOST) {
- atomic_inc(&cfg->scan_host_needed);
- schedule_work(&cfg->work_q);
- }
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * read_vpd() - obtains the WWPNs from VPD
- * @cfg: Internal structure associated with the host.
- * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
-{
- struct device *dev = &cfg->dev->dev;
- struct pci_dev *pdev = cfg->dev;
- int i, k, rc = 0;
- unsigned int kw_size;
- ssize_t vpd_size;
- char vpd_data[CXLFLASH_VPD_LEN];
- char tmp_buf[WWPN_BUF_LEN] = { 0 };
- const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
- cfg->dev_id->driver_data;
- const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
- const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
-
- /* Get the VPD data from the device */
- vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
- if (unlikely(vpd_size <= 0)) {
- dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
- __func__, vpd_size);
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Find the offset of the WWPN tag within the read only
- * VPD data and validate the found field (partials are
- * no good to us). Convert the ASCII data to an integer
- * value. Note that we must copy to a temporary buffer
- * because the conversion service requires that the ASCII
- * string be terminated.
- *
- * Allow for WWPN not being found for all devices, setting
- * the returned WWPN to zero when not found. Notify with a
- * log error for cards that should have had WWPN keywords
- * in the VPD - cards requiring WWPN will not have their
- * ports programmed and operate in an undefined state.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
- wwpn_vpd_tags[k], &kw_size);
- if (i == -ENOENT) {
- if (wwpn_vpd_required)
- dev_err(dev, "%s: Port %d WWPN not found\n",
- __func__, k);
- wwpn[k] = 0ULL;
- continue;
- }
-
- if (i < 0 || kw_size != WWPN_LEN) {
- dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
- rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
- if (unlikely(rc)) {
- dev_err(dev, "%s: WWPN conversion failed for port %d\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_pcr() - initialize the provisioning and control registers
- * @cfg: Internal structure associated with the host.
- *
- * Also sets up fast access to the mapped registers and initializes AFU
- * command fields that never change.
- */
-static void init_pcr(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map;
- struct hwq *hwq;
- void *cookie;
- int i;
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctrl_map = &afu->afu_map->ctrls[i].ctrl;
- /* Disrupt any clients that could be running */
- /* e.g. clients that survived a master restart */
- writeq_be(0, &ctrl_map->rht_start);
- writeq_be(0, &ctrl_map->rht_cnt_id);
- writeq_be(0, &ctrl_map->ctx_cap);
- }
-
- /* Copy frequently used fields into hwq */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- cookie = hwq->ctx_cookie;
-
- hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
- hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
- hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
-
- /* Program the Endian Control for the master context */
- writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
- }
-}
-
-/**
- * init_global() - initialize AFU global registers
- * @cfg: Internal structure associated with the host.
- */
-static int init_global(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- struct sisl_host_map __iomem *hmap;
- __be64 __iomem *fc_port_regs;
- u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
- int i = 0, num_ports = 0;
- int rc = 0;
- int j;
- void *ctx;
- u64 reg;
-
- rc = read_vpd(cfg, &wwpn[0]);
- if (rc) {
- dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
- goto out;
- }
-
- /* Set up RRQ and SQ in HWQ for master issued cmds */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- hmap = hwq->host_map;
-
- writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
- writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
- hwq->hrrq_online = true;
-
- if (afu_is_sq_cmd_mode(afu)) {
- writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
- writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
- }
- }
-
- /* AFU configuration */
- reg = readq_be(&afu->afu_map->global.regs.afu_config);
- reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
- /* enable all auto retry options and control endianness */
- /* leave others at default: */
- /* CTX_CAP write protected, mbox_r does not clear on read and */
- /* checker on if dual afu */
- writeq_be(reg, &afu->afu_map->global.regs.afu_config);
-
- /* Global port select: select either port */
- if (afu->internal_lun) {
- /* Only use port 0 */
- writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = 0;
- } else {
- writeq_be(PORT_MASK(cfg->num_fc_ports),
- &afu->afu_map->global.regs.afu_port_sel);
- num_ports = cfg->num_fc_ports;
- }
-
- for (i = 0; i < num_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- /* Unmask all errors (but they are still masked at AFU) */
- writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
- /* Clear CRC error cnt & set a threshold */
- (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
- writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
-
- /* Set WWPNs. If already programmed, wwpn[i] is 0 */
- if (wwpn[i] != 0)
- afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
- /* Programming WWPN back to back causes additional
- * offline/online transitions and a PLOGI
- */
- msleep(100);
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each master */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- ctx = hwq->ctx_cookie;
-
- for (j = 0; j < hwq->num_irqs; j++) {
- reg = cfg->ops->get_irq_objhndl(ctx, j);
- writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
- }
-
- reg = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(reg, reg),
- &hwq->ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, reg),
- &hwq->ctrl_map->lisn_pasid[1]);
- }
- }
-
- /* Set up master's own CTX_CAP to allow real mode, host translation */
- /* tables, afu cmds and read/write GSCSI cmds. */
- /* First, unlock ctx_cap write by reading mbox */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
- writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
- SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
- SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
- &hwq->ctrl_map->ctx_cap);
- }
-
- /*
- * Determine write-same unmap support for host by evaluating the unmap
- * sector support bit of the context control register associated with
- * the primary hardware queue. Note that while this status is reflected
- * in a context register, the outcome can be assumed to be host-wide.
- */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
- cfg->ws_unmap = true;
-
- /* Initialize heartbeat */
- afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-out:
- return rc;
-}
-
-/**
- * start_afu() - initializes and starts the AFU
- * @cfg: Internal structure associated with the host.
- */
-static int start_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int rc = 0;
- int i;
-
- init_pcr(cfg);
-
- /* Initialize each HWQ */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- /* After an AFU reset, RRQ entries are stale, clear them */
- memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
-
- /* Initialize RRQ pointers */
- hwq->hrrq_start = &hwq->rrq_entry[0];
- hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
- hwq->hrrq_curr = hwq->hrrq_start;
- hwq->toggle = 1;
-
- /* Initialize spin locks */
- spin_lock_init(&hwq->hrrq_slock);
- spin_lock_init(&hwq->hsq_slock);
-
- /* Initialize SQ */
- if (afu_is_sq_cmd_mode(afu)) {
- memset(&hwq->sq, 0, sizeof(hwq->sq));
- hwq->hsq_start = &hwq->sq[0];
- hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
- hwq->hsq_curr = hwq->hsq_start;
-
- atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
- }
-
- /* Initialize IRQ poll */
- if (afu_is_irqpoll_enabled(afu))
- irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
- cxlflash_irqpoll);
-
- }
-
- rc = init_global(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_intr() - setup interrupt handlers for the master context
- * @cfg: Internal structure associated with the host.
- * @hwq: Hardware queue to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static enum undo_level init_intr(struct cxlflash_cfg *cfg,
- struct hwq *hwq)
-{
- struct device *dev = &cfg->dev->dev;
- void *ctx = hwq->ctx_cookie;
- int rc = 0;
- enum undo_level level = UNDO_NOOP;
- bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
- int num_irqs = hwq->num_irqs;
-
- rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
- if (unlikely(rc)) {
- dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
- __func__, rc);
- level = UNDO_NOOP;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
- level = FREE_IRQ;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
- level = UNMAP_ONE;
- goto out;
- }
-
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (!is_primary_hwq)
- goto out;
-
- rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
- level = UNMAP_TWO;
- goto out;
- }
-out:
- return level;
-}
-
-/**
- * init_mc() - create and register as the master context
- * @cfg: Internal structure associated with the host.
- * @index: HWQ Index of the master context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- void *ctx;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
- int num_irqs;
- enum undo_level level;
-
- hwq->afu = cfg->afu;
- hwq->index = index;
- INIT_LIST_HEAD(&hwq->pending_cmds);
-
- if (index == PRIMARY_HWQ) {
- ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
- num_irqs = 3;
- } else {
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- num_irqs = 2;
- }
- if (IS_ERR_OR_NULL(ctx)) {
- rc = -ENOMEM;
- goto err1;
- }
-
- WARN_ON(hwq->ctx_cookie);
- hwq->ctx_cookie = ctx;
- hwq->num_irqs = num_irqs;
-
- /* Set it up as a master with the CXL */
- cfg->ops->set_master(ctx);
-
- /* Reset AFU when initializing primary context */
- if (index == PRIMARY_HWQ) {
- rc = cfg->ops->afu_reset(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU reset failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- }
-
- level = init_intr(cfg, hwq);
- if (unlikely(level)) {
- dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- /* Finally, activate the context by starting it */
- rc = cfg->ops->start_context(hwq->ctx_cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
- level = UNMAP_THREE;
- goto err2;
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- term_intr(cfg, level, index);
- if (index != PRIMARY_HWQ)
- cfg->ops->release_context(ctx);
-err1:
- hwq->ctx_cookie = NULL;
- goto out;
-}
-
-/**
- * get_num_afu_ports() - determines and configures the number of AFU ports
- * @cfg: Internal structure associated with the host.
- *
- * This routine determines the number of AFU ports by converting the global
- * port selection mask. The converted value is only valid following an AFU
- * reset (explicit or power-on). This routine must be invoked shortly after
- * mapping as other routines are dependent on the number of ports during the
- * initialization sequence.
- *
- * To support legacy AFUs that might not have reflected an initial global
- * port mask (value read is 0), default to the number of ports originally
- * supported by the cxlflash driver (2) before hardware with other port
- * offerings was introduced.
- */
-static void get_num_afu_ports(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- u64 port_mask;
- int num_fc_ports = LEGACY_FC_PORTS;
-
- port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- if (port_mask != 0ULL)
- num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
-
- dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
- __func__, port_mask, num_fc_ports);
-
- cfg->num_fc_ports = num_fc_ports;
- cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
-}
-
-/**
- * init_afu() - setup as master context and start AFU
- * @cfg: Internal structure associated with the host.
- *
- * This routine is a higher level of control for configuring the
- * AFU on probe and reset paths.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_afu(struct cxlflash_cfg *cfg)
-{
- u64 reg;
- int rc = 0;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int i;
-
- cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
-
- mutex_init(&afu->sync_active);
- afu->num_hwqs = afu->desired_hwqs;
- for (i = 0; i < afu->num_hwqs; i++) {
- rc = init_mc(cfg, i);
- if (rc) {
- dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
- __func__, rc, i);
- goto err1;
- }
- }
-
- /* Map the entire MMIO space of the AFU using the first context */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
- if (!afu->afu_map) {
- dev_err(dev, "%s: psa_map failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- /* No byte reverse on reading afu_version or string will be backwards */
- reg = readq(&afu->afu_map->global.regs.afu_version);
- memcpy(afu->version, &reg, sizeof(reg));
- afu->interface_version =
- readq_be(&afu->afu_map->global.regs.interface_version);
- if ((afu->interface_version + 1) == 0) {
- dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
- "interface version %016llx\n", afu->version,
- afu->interface_version);
- rc = -EINVAL;
- goto err1;
- }
-
- if (afu_is_sq_cmd_mode(afu)) {
- afu->send_cmd = send_cmd_sq;
- afu->context_reset = context_reset_sq;
- } else {
- afu->send_cmd = send_cmd_ioarrin;
- afu->context_reset = context_reset_ioarrin;
- }
-
- dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
- afu->version, afu->interface_version);
-
- get_num_afu_ports(cfg);
-
- rc = start_afu(cfg);
- if (rc) {
- dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
- goto err1;
- }
-
- afu_err_intr_init(cfg->afu);
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- hwq->room = readq_be(&hwq->host_map->cmd_room);
- }
-
- /* Restore the LUN mappings */
- cxlflash_restore_luntable(cfg);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err1:
- for (i = afu->num_hwqs - 1; i >= 0; i--) {
- term_intr(cfg, UNMAP_THREE, i);
- term_mc(cfg, i);
- }
- goto out;
-}
-
-/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
-
- rc = init_afu(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
- *
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
- */
-static void drain_ioctls(struct cxlflash_cfg *cfg)
-{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
-}
-
-/**
- * cxlflash_async_reset_host() - asynchronous host reset handler
- * @data: Private data provided while scheduling reset.
- * @cookie: Cookie that can be used for checkpointing.
- */
-static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
-{
- struct cxlflash_cfg *cfg = data;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- if (cfg->state != STATE_RESET) {
- dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
-
-out:
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Not performing reset state=%d\n",
- __func__, cfg->state);
- return;
- }
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
- cfg);
-}
-
-/**
- * send_afu_cmd() - builds and sends an internal AFU command
- * @afu: AFU associated with the host.
- * @rcb: Pre-populated IOARCB describing command to send.
- *
- * The AFU can only take one internal AFU command at a time. This limitation is
- * enforced by using a mutex to provide exclusive access to the AFU during the
- * operation. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent AFU operations.
- *
- * The command status is optionally passed back to the caller when the caller
- * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
- *
- * Return:
- * 0 on success, -errno on failure
- */
-static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = NULL;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- ulong lock_flags;
- char *buf = NULL;
- int rc = 0;
- int nretry = 0;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Sync not required state=%u\n",
- __func__, cfg->state);
- return 0;
- }
-
- mutex_lock(&afu->sync_active);
- atomic_inc(&afu->cmds_active);
- buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
-
-retry:
- memset(cmd, 0, sizeof(*cmd));
- memcpy(&cmd->rcb, rcb, sizeof(*rcb));
- INIT_LIST_HEAD(&cmd->queue);
- init_completion(&cmd->cevent);
- cmd->parent = afu;
- cmd->hwq_index = hwq->index;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
- __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- rc = -ENOBUFS;
- goto out;
- }
-
- rc = wait_resp(afu, cmd);
- switch (rc) {
- case -ETIMEDOUT:
- rc = afu->context_reset(hwq);
- if (rc) {
- /* Delete the command from pending_cmds list */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- cxlflash_schedule_async_reset(cfg);
- break;
- }
- fallthrough; /* to retry */
- case -EAGAIN:
- if (++nretry < 2)
- goto retry;
- fallthrough; /* to exit */
- default:
- break;
- }
-
- if (rcb->ioasa)
- *rcb->ioasa = cmd->sa;
-out:
- atomic_dec(&afu->cmds_active);
- mutex_unlock(&afu->sync_active);
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
- * @afu: AFU associated with the host.
- * @ctx: Identifies context requesting sync.
- * @res: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
- *
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
- *
- * Return:
- * 0 on success, -errno on failure
- */
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb = { 0 };
-
- dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
- __func__, afu, ctx, res, mode);
-
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- rcb.cdb[0] = SISL_AFU_CMD_SYNC;
- rcb.cdb[1] = mode;
- put_unaligned_be16(ctx, &rcb.cdb[2]);
- put_unaligned_be32(res, &rcb.cdb[4]);
-
- return send_afu_cmd(afu, &rcb);
-}
-
-/**
- * cxlflash_eh_abort_handler() - abort a SCSI command
- * @scp: SCSI command to abort.
- *
- * CXL Flash devices do not support a single command abort. Reset the context
- * as per SISLite specification. Flush any pending commands in the hardware
- * queue before the reset.
- *
- * Return: SUCCESS/FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
-{
- int rc = FAILED;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afuc(scp);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /* When the state is not normal, another reset/reload is in progress.
- * Return failed and the mid-layer will invoke host reset handler.
- */
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- rc = afu->context_reset(hwq);
- if (unlikely(rc))
- goto out;
-
- rc = SUCCESS;
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp: SCSI command to send.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- struct scsi_device *sdev = scp->device;
- struct Scsi_Host *host = sdev->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
- int rcr = 0;
-
- dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
- host->host_no, sdev->channel, sdev->id, sdev->lun);
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
- if (unlikely(rcr))
- rc = FAILED;
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- goto retry;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp: SCSI command from stack identifying host.
- *
- * Following a reset, the state is evaluated again in case an EEH occurred
- * during the reset. In such a scenario, the host reset will either yield
- * until the EEH recovery is complete or return success or failure based
- * upon the current device state.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- int rcr = 0;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
-
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rcr = afu_reset(cfg);
- if (rcr) {
- rc = FAILED;
- cfg->state = STATE_FAILTERM;
- } else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- fallthrough;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- break;
- fallthrough;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev: SCSI device destined for queue depth change.
- * @qdepth: Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
- if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
- qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
- scsi_change_queue_depth(sdev, qdepth);
- return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_status(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- char *disp_status;
- u64 status;
- __be64 __iomem *fc_port_regs;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_regs = get_fc_port_regs(cfg, port);
- status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
- status &= FC_MTIP_STATUS_MASK;
-
- if (status == FC_MTIP_STATUS_ONLINE)
- disp_status = "online";
- else if (status == FC_MTIP_STATUS_OFFLINE)
- disp_status = "offline";
- else
- disp_status = "unknown";
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * port0_show() - queries and presents the current status of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(0, cfg, buf);
-}
-
-/**
- * port1_show() - queries and presents the current status of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(1, cfg, buf);
-}
-
-/**
- * port2_show() - queries and presents the current status of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(2, cfg, buf);
-}
-
-/**
- * port3_show() - queries and presents the current status of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(3, cfg, buf);
-}
-
-/**
- * lun_mode_show() - presents the current LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * lun_mode_store() - sets the LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct afu *afu = cfg->afu;
- int rc;
- u32 lun_mode;
-
- rc = kstrtouint(buf, 10, &lun_mode);
- if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
- afu->internal_lun = lun_mode;
-
- /*
- * When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be one less than the number
- * of fc ports for this card.
- */
- if (afu->internal_lun)
- shost->max_channel = 0;
- else
- shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
-
- afu_reset(cfg);
- scsi_scan_host(cfg->host);
- }
-
- return count;
-}
-
-/**
- * ioctl_version_show() - presents the current ioctl version of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the ioctl version.
- * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t ioctl_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t bytes = 0;
-
- bytes = scnprintf(buf, PAGE_SIZE,
- "disk: %u\n", DK_CXLFLASH_VERSION_0);
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "host: %u\n", HT_CXLFLASH_VERSION_0);
-
- return bytes;
-}
-
-/**
- * cxlflash_show_port_lun_table() - queries and presents the port LUN table
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
- int i;
- ssize_t bytes = 0;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_luns = get_fc_port_luns(cfg, port);
-
- for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llx\n",
- i, readq_be(&fc_port_luns[i]));
- return bytes;
-}
-
-/**
- * port0_lun_table_show() - presents the current LUN table of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(0, cfg, buf);
-}
-
-/**
- * port1_lun_table_show() - presents the current LUN table of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(1, cfg, buf);
-}
-
-/**
- * port2_lun_table_show() - presents the current LUN table of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(2, cfg, buf);
-}
-
-/**
- * port3_lun_table_show() - presents the current LUN table of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(3, cfg, buf);
-}
-
-/**
- * irqpoll_weight_show() - presents the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
- * weight in ASCII.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
-}
-
-/**
- * irqpoll_weight_store() - sets the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
- * weight in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- u32 weight;
- int rc, i;
-
- rc = kstrtouint(buf, 10, &weight);
- if (rc)
- return -EINVAL;
-
- if (weight > 256) {
- dev_info(cfgdev,
- "Invalid IRQ poll weight. It must be 256 or less.\n");
- return -EINVAL;
- }
-
- if (weight == afu->irqpoll_weight) {
- dev_info(cfgdev,
- "Current IRQ poll weight has the same weight.\n");
- return -EINVAL;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- afu->irqpoll_weight = weight;
-
- if (weight > 0) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
- }
- }
-
- return count;
-}
-
-/**
- * num_hwqs_show() - presents the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
- * queues in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
-}
-
-/**
- * num_hwqs_store() - sets the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE containing the number of hardware
- * queues in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * n > 0: num_hwqs = n
- * n = 0: num_hwqs = num_online_cpus()
- * n < 0: num_online_cpus() / abs(n)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- int rc;
- int nhwqs, num_hwqs;
-
- rc = kstrtoint(buf, 10, &nhwqs);
- if (rc)
- return -EINVAL;
-
- if (nhwqs >= 1)
- num_hwqs = nhwqs;
- else if (nhwqs == 0)
- num_hwqs = num_online_cpus();
- else
- num_hwqs = num_online_cpus() / abs(nhwqs);
-
- afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
- WARN_ON_ONCE(afu->desired_hwqs == 0);
-
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- goto retry;
- fallthrough;
- default:
- /* Ideally should not happen */
- dev_err(dev, "%s: Device is not ready, state=%d\n",
- __func__, cfg->state);
- break;
- }
-
- return count;
-}
-
-static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
-
-/**
- * hwq_mode_show() - presents the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
- * as a character string.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
-}
-
-/**
- * hwq_mode_store() - sets the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
- * as a character string.
- * @count: Length of data resizing in @buf.
- *
- * rr = Round-Robin
- * tag = Block MQ Tagging
- * cpu = CPU Affinity
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- int i;
- u32 mode = MAX_HWQ_MODE;
-
- for (i = 0; i < MAX_HWQ_MODE; i++) {
- if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
- mode = i;
- break;
- }
- }
-
- if (mode >= MAX_HWQ_MODE) {
- dev_info(cfgdev, "Invalid HWQ steering mode.\n");
- return -EINVAL;
- }
-
- afu->hwq_mode = mode;
-
- return count;
-}
-
-/**
- * mode_show() - presents the current mode of the device
- * @dev: Generic device associated with the device.
- * @attr: Device attribute representing the device mode.
- * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/*
- * Host attributes
- */
-static DEVICE_ATTR_RO(port0);
-static DEVICE_ATTR_RO(port1);
-static DEVICE_ATTR_RO(port2);
-static DEVICE_ATTR_RO(port3);
-static DEVICE_ATTR_RW(lun_mode);
-static DEVICE_ATTR_RO(ioctl_version);
-static DEVICE_ATTR_RO(port0_lun_table);
-static DEVICE_ATTR_RO(port1_lun_table);
-static DEVICE_ATTR_RO(port2_lun_table);
-static DEVICE_ATTR_RO(port3_lun_table);
-static DEVICE_ATTR_RW(irqpoll_weight);
-static DEVICE_ATTR_RW(num_hwqs);
-static DEVICE_ATTR_RW(hwq_mode);
-
-static struct attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0.attr,
- &dev_attr_port1.attr,
- &dev_attr_port2.attr,
- &dev_attr_port3.attr,
- &dev_attr_lun_mode.attr,
- &dev_attr_ioctl_version.attr,
- &dev_attr_port0_lun_table.attr,
- &dev_attr_port1_lun_table.attr,
- &dev_attr_port2_lun_table.attr,
- &dev_attr_port3_lun_table.attr,
- &dev_attr_irqpoll_weight.attr,
- &dev_attr_num_hwqs.attr,
- &dev_attr_hwq_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_host);
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR_RO(mode);
-
-static struct attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_dev);
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = CXLFLASH_ADAPTER_NAME,
- .info = cxlflash_driver_info,
- .ioctl = cxlflash_ioctl,
- .proc_name = CXLFLASH_NAME,
- .queuecommand = cxlflash_queuecommand,
- .eh_abort_handler = cxlflash_eh_abort_handler,
- .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
- .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
- .change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
- .can_queue = CXLFLASH_MAX_CMDS,
- .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
- .this_id = -1,
- .sg_tablesize = 1, /* No scatter gather support */
- .max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_groups = cxlflash_host_groups,
- .sdev_groups = cxlflash_dev_groups,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_WWPN_VPD_REQUIRED };
-static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_NOTIFY_SHUTDOWN };
-static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
- (CXLFLASH_NOTIFY_SHUTDOWN |
- CXLFLASH_OCXL_DEV) };
-
-/*
- * PCI device binding table
- */
-static struct pci_device_id cxlflash_pci_table[] = {
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
-/**
- * cxlflash_worker_thread() - work thread handler for the AFU
- * @work: Work structure contained within cxlflash associated with host.
- *
- * Handles the following events:
- * - Link reset which cannot be performed on interrupt context due to
- * blocking up to a few seconds
- * - Rescan the host
- */
-static void cxlflash_worker_thread(struct work_struct *work)
-{
- struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
- work_q);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_regs;
- int port;
- ulong lock_flags;
-
- /* Avoid MMIO if the device has failed */
-
- if (cfg->state != STATE_NORMAL)
- return;
-
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
-
- if (cfg->lr_state == LINK_RESET_REQUIRED) {
- port = cfg->lr_port;
- if (port < 0)
- dev_err(dev, "%s: invalid port index %d\n",
- __func__, port);
- else {
- spin_unlock_irqrestore(cfg->host->host_lock,
- lock_flags);
-
- /* The reset can block... */
- fc_port_regs = get_fc_port_regs(cfg, port);
- afu_link_reset(afu, port, fc_port_regs);
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
- }
-
- cfg->lr_state = LINK_RESET_COMPLETE;
- }
-
- spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
-
- if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
- scsi_scan_host(cfg->host);
-}
-
-/**
- * cxlflash_chr_open() - character device open handler
- * @inode: Device inode associated with this character device.
- * @file: File pointer for this device.
- *
- * Only users with admin privileges are allowed to open the character device.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_chr_open(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
- file->private_data = cfg;
-
- return 0;
-}
-
-/**
- * decode_hioctl() - translates encoded host ioctl to easily identifiable string
- * @cmd: The host ioctl command to decode.
- *
- * Return: A string identifying the decoded host ioctl.
- */
-static char *decode_hioctl(unsigned int cmd)
-{
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_lun_provision() - host LUN provisioning handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_lun_provision *lunprov = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- __be64 __iomem *fc_port_regs;
- u16 port = lunprov->port;
- u16 scmd = lunprov->hdr.subcmd;
- u16 type;
- u64 reg;
- u64 size;
- u64 lun_id;
- int rc = 0;
-
- if (!afu_is_lun_provision(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (port >= cfg->num_fc_ports) {
- rc = -EINVAL;
- goto out;
- }
-
- switch (scmd) {
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
- type = SISL_AFU_LUN_PROVISION_CREATE;
- size = lunprov->size;
- lun_id = 0;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
- type = SISL_AFU_LUN_PROVISION_DELETE;
- size = 0;
- lun_id = lunprov->lun_id;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
- lunprov->max_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
- lunprov->cur_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
- lunprov->max_cap_port = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
- lunprov->cur_cap_port = reg;
-
- goto out;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.lun_id = lun_id;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_LUN_PROV_TIMEOUT;
- rcb.ioasa = &asa;
-
- rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
- rcb.cdb[1] = type;
- rcb.cdb[2] = port;
- put_unaligned_be64(size, &rcb.cdb[8]);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
- lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
- memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
- }
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_debug() - host AFU debug handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * For debug requests requiring a data buffer, always provide an aligned
- * (cache line) buffer to the AFU to appease any alignment requirements.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_afu_debug *afu_dbg = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- char *buf = NULL;
- char *kbuf = NULL;
- void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
- u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
- u32 ulen = afu_dbg->data_len;
- bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
- int rc = 0;
-
- if (!afu_is_afu_debug(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (ulen) {
- req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
-
- if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
- rc = -EINVAL;
- goto out;
- }
-
- buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- rc = -ENOMEM;
- goto out;
- }
-
- kbuf = PTR_ALIGN(buf, cache_line_size());
-
- if (is_write) {
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- if (copy_from_user(kbuf, ubuf, ulen)) {
- rc = -EFAULT;
- goto out;
- }
- }
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
-
- rcb.req_flags = req_flags;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
- rcb.ioasa = &asa;
-
- if (ulen) {
- rcb.data_len = ulen;
- rcb.data_ea = (uintptr_t)kbuf;
- }
-
- rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
- memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
- HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (ulen && !is_write) {
- if (copy_to_user(ubuf, kbuf, ulen))
- rc = -EFAULT;
- }
-out:
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_chr_ioctl() - character device IOCTL handler
- * @file: File pointer for this device.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- typedef int (*hioctl) (struct cxlflash_cfg *, void *);
-
- struct cxlflash_cfg *cfg = file->private_data;
- struct device *dev = &cfg->dev->dev;
- char buf[sizeof(union cxlflash_ht_ioctls)];
- void __user *uarg = (void __user *)arg;
- struct ht_cxlflash_hdr *hdr;
- size_t size = 0;
- bool known_ioctl = false;
- int idx = 0;
- int rc = 0;
- hioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- hioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
- { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
- __func__, cmd, idx, sizeof(ioctl_tbl));
-
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- case HT_CXLFLASH_AFU_DEBUG:
- known_ioctl = true;
- idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(copy_from_user(&buf, uarg, size))) {
- dev_err(dev, "%s: copy_from_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- goto out;
- }
-
- hdr = (struct ht_cxlflash_hdr *)&buf;
- if (hdr->version != HT_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_hioctl(cmd));
- rc = -EINVAL;
- goto out;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = do_ioctl(cfg, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(uarg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-out:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- return rc;
-}
-
-/*
- * Character device file operations
- */
-static const struct file_operations cxlflash_chr_fops = {
- .owner = THIS_MODULE,
- .open = cxlflash_chr_open,
- .unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/**
- * init_chrdev() - initialize the character device for the host
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_chrdev(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct device *char_dev;
- dev_t devno;
- int minor;
- int rc = 0;
-
- minor = cxlflash_get_minor();
- if (unlikely(minor < 0)) {
- dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
- rc = -ENOSPC;
- goto out;
- }
-
- devno = MKDEV(cxlflash_major, minor);
- cdev_init(&cfg->cdev, &cxlflash_chr_fops);
-
- rc = cdev_add(&cfg->cdev, devno, 1);
- if (rc) {
- dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- char_dev = device_create(&cxlflash_class, NULL, devno,
- NULL, "cxlflash%d", minor);
- if (IS_ERR(char_dev)) {
- rc = PTR_ERR(char_dev);
- dev_err(dev, "%s: device_create failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- cfg->chardev = char_dev;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- cdev_del(&cfg->cdev);
-err1:
- cxlflash_put_minor(minor);
- goto out;
-}
-
-/**
- * cxlflash_probe() - PCI entry point to add host
- * @pdev: PCI device associated with the host.
- * @dev_id: PCI device id associated with device.
- *
- * The device will initially start out in a 'probing' state and
- * transition to the 'normal' state at the end of a successful
- * probe. Should an EEH event occur during probe, the notification
- * thread (error_detected()) will wait until the probe handler
- * is nearly complete. At that time, the device will be moved to
- * a 'probed' state and the EEH thread woken up to drive the slot
- * reset and recovery (device moves to 'normal' state). Meanwhile,
- * the probe will be allowed to exit successfully.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
-{
- struct Scsi_Host *host;
- struct cxlflash_cfg *cfg = NULL;
- struct device *dev = &pdev->dev;
- struct dev_dependent_vals *ddv;
- int rc = 0;
- int k;
-
- dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
- __func__, pdev->irq);
-
- ddv = (struct dev_dependent_vals *)dev_id->driver_data;
- driver_template.max_sectors = ddv->max_sectors;
-
- host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
- if (!host) {
- dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
- host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
- host->unique_id = host->host_no;
- host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
-
- cfg = shost_priv(host);
- cfg->state = STATE_PROBING;
- cfg->host = host;
- rc = alloc_mem(cfg);
- if (rc) {
- dev_err(dev, "%s: alloc_mem failed\n", __func__);
- rc = -ENOMEM;
- scsi_host_put(cfg->host);
- goto out;
- }
-
- cfg->init_state = INIT_STATE_NONE;
- cfg->dev = pdev;
- cfg->cxl_fops = cxlflash_cxl_fops;
- cfg->ops = cxlflash_assign_ops(ddv);
- WARN_ON_ONCE(!cfg->ops);
-
- /*
- * Promoted LUNs move to the top of the LUN table. The rest stay on
- * the bottom half. The bottom half grows from the end (index = 255),
- * whereas the top half grows from the beginning (index = 0).
- *
- * Initialize the last LUN index for all possible ports.
- */
- cfg->promote_lun_index = 0;
-
- for (k = 0; k < MAX_FC_PORTS; k++)
- cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
-
- cfg->dev_id = (struct pci_device_id *)dev_id;
-
- init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->reset_waitq);
-
- INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
- cfg->lr_state = LINK_RESET_INVALID;
- cfg->lr_port = -1;
- spin_lock_init(&cfg->tmf_slock);
- mutex_init(&cfg->ctx_tbl_list_mutex);
- mutex_init(&cfg->ctx_recovery_mutex);
- init_rwsem(&cfg->ioctl_rwsem);
- INIT_LIST_HEAD(&cfg->ctx_err_recovery);
- INIT_LIST_HEAD(&cfg->lluns);
-
- pci_set_drvdata(pdev, cfg);
-
- rc = init_pci(cfg);
- if (rc) {
- dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_PCI;
-
- cfg->afu_cookie = cfg->ops->create_afu(pdev);
- if (unlikely(!cfg->afu_cookie)) {
- dev_err(dev, "%s: create_afu failed\n", __func__);
- rc = -ENOMEM;
- goto out_remove;
- }
-
- rc = init_afu(cfg);
- if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
- dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_AFU;
-
- rc = init_scsi(cfg);
- if (rc) {
- dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_SCSI;
-
- rc = init_chrdev(cfg);
- if (rc) {
- dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_CDEV;
-
- if (wq_has_sleeper(&cfg->reset_waitq)) {
- cfg->state = STATE_PROBED;
- wake_up_all(&cfg->reset_waitq);
- } else
- cfg->state = STATE_NORMAL;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-out_remove:
- cfg->state = STATE_PROBED;
- cxlflash_remove(pdev);
- goto out;
-}
-
-/**
- * cxlflash_pci_error_detected() - called when a PCI error is detected
- * @pdev: PCI device struct.
- * @state: PCI channel state.
- *
- * When an EEH occurs during an active reset, wait until the reset is
- * complete and then take action based upon the device state.
- *
- * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
-
- switch (state) {
- case pci_channel_io_frozen:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- if (cfg->state == STATE_FAILTERM)
- return PCI_ERS_RESULT_DISCONNECT;
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- drain_ioctls(cfg);
- rc = cxlflash_mark_contexts_error(cfg);
- if (unlikely(rc))
- dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
- __func__, rc);
- term_afu(cfg);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
- return PCI_ERS_RESULT_DISCONNECT;
- default:
- break;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * cxlflash_pci_slot_reset() - called when PCI slot has been reset
- * @pdev: PCI device struct.
- *
- * This routine is called by the pci error recovery code after the PCI
- * slot has been reset, just before we should resume normal operations.
- *
- * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- rc = init_afu(cfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * cxlflash_pci_resume() - called when normal operation can resume
- * @pdev: PCI device struct
- */
-static void cxlflash_pci_resume(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
- * @dev: Character device.
- * @mode: Mode that can be used to verify access.
- *
- * Return: Allocated string describing the devtmpfs structure.
- */
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
-}
-
-/**
- * cxlflash_class_init() - create character device class
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_class_init(void)
-{
- dev_t devno;
- int rc = 0;
-
- rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
- if (unlikely(rc)) {
- pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- cxlflash_major = MAJOR(devno);
-
- rc = class_register(&cxlflash_class);
- if (rc) {
- pr_err("%s: class_create failed rc=%d\n", __func__, rc);
- goto err;
- }
-
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
- goto out;
-}
-
-/**
- * cxlflash_class_exit() - destroy character device class
- */
-static void cxlflash_class_exit(void)
-{
- dev_t devno = MKDEV(cxlflash_major, 0);
-
- class_unregister(&cxlflash_class);
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
-}
-
-static const struct pci_error_handlers cxlflash_err_handler = {
- .error_detected = cxlflash_pci_error_detected,
- .slot_reset = cxlflash_pci_slot_reset,
- .resume = cxlflash_pci_resume,
-};
-
-/*
- * PCI device structure
- */
-static struct pci_driver cxlflash_driver = {
- .name = CXLFLASH_NAME,
- .id_table = cxlflash_pci_table,
- .probe = cxlflash_probe,
- .remove = cxlflash_remove,
- .shutdown = cxlflash_remove,
- .err_handler = &cxlflash_err_handler,
-};
-
-/**
- * init_cxlflash() - module entry point
- *
- * Return: 0 on success, -errno on failure
- */
-static int __init init_cxlflash(void)
-{
- int rc;
-
- check_sizes();
- cxlflash_list_init();
- rc = cxlflash_class_init();
- if (unlikely(rc))
- goto out;
-
- rc = pci_register_driver(&cxlflash_driver);
- if (unlikely(rc))
- goto err;
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- cxlflash_class_exit();
- goto out;
-}
-
-/**
- * exit_cxlflash() - module exit point
- */
-static void __exit exit_cxlflash(void)
-{
- cxlflash_term_global_luns();
- cxlflash_free_errpage();
-
- pci_unregister_driver(&cxlflash_driver);
- cxlflash_class_exit();
-}
-
-module_init(init_cxlflash);
-module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
deleted file mode 100644
index 0bfb98effce0..000000000000
--- a/drivers/scsi/cxlflash/main.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_MAIN_H
-#define _CXLFLASH_MAIN_H
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-#define CXLFLASH_NAME "cxlflash"
-#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
-#define CXLFLASH_MAX_ADAPTERS 32
-
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
-#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
-
-/* Since there is only one target, make it 0 */
-#define CXLFLASH_TARGET 0
-#define CXLFLASH_MAX_CDB_LEN 16
-
-/* Really only one target per bus since the Texan is directly attached */
-#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
-#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
-
-#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
-
-/* FC defines */
-#define FC_MTIP_CMDCONFIG 0x010
-#define FC_MTIP_STATUS 0x018
-#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
-#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
-#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
-#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
-
-#define FC_PNAME 0x300
-#define FC_CONFIG 0x320
-#define FC_CONFIG2 0x328
-#define FC_STATUS 0x330
-#define FC_ERROR 0x380
-#define FC_ERRCAP 0x388
-#define FC_ERRMSK 0x390
-#define FC_CNT_CRCERR 0x538
-#define FC_CRC_THRESH 0x580
-
-#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
-#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
-
-#define FC_MTIP_STATUS_MASK 0x30ULL
-#define FC_MTIP_STATUS_ONLINE 0x20ULL
-#define FC_MTIP_STATUS_OFFLINE 0x10ULL
-
-/* TIMEOUT and RETRY definitions */
-
-/* AFU command timeout values */
-#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
-#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
-#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
-
-/* AFU command room retry limit */
-#define MC_ROOM_RETRY_CNT 10
-
-/* FC CRC clear periodic timer */
-#define MC_CRC_THRESH 100 /* threshold in 5 mins */
-
-#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
-#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
-
-/* VPD defines */
-#define CXLFLASH_VPD_LEN 256
-#define WWPN_LEN 16
-#define WWPN_BUF_LEN (WWPN_LEN + 1)
-
-enum undo_level {
- UNDO_NOOP = 0,
- FREE_IRQ,
- UNMAP_ONE,
- UNMAP_TWO,
- UNMAP_THREE
-};
-
-struct dev_dependent_vals {
- u64 max_sectors;
- u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
-#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
-#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
-};
-
-static inline const struct cxlflash_backend_ops *
-cxlflash_assign_ops(struct dev_dependent_vals *ddv)
-{
- const struct cxlflash_backend_ops *ops = NULL;
-
-#ifdef CONFIG_OCXL_BASE
- if (ddv->flags & CXLFLASH_OCXL_DEV)
- ops = &cxlflash_ocxl_ops;
-#endif
-
-#ifdef CONFIG_CXL_BASE
- if (!(ddv->flags & CXLFLASH_OCXL_DEV))
- ops = &cxlflash_cxl_ops;
-#endif
-
- return ops;
-}
-
-struct asyc_intr_info {
- u64 status;
- char *desc;
- u8 port;
- u8 action;
-#define CLR_FC_ERROR 0x01
-#define LINK_RESET 0x02
-#define SCAN_HOST 0x04
-};
-
-#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
deleted file mode 100644
index 6542818e595a..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <linux/file.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/poll.h>
-#include <linux/sched/signal.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <asm/xive.h>
-#include <misc/ocxl.h>
-
-#include <uapi/misc/cxl.h>
-
-#include "backend.h"
-#include "ocxl_hw.h"
-
-/*
- * Pseudo-filesystem to allocate inodes.
- */
-
-#define OCXLFLASH_FS_MAGIC 0x1697698f
-
-static int ocxlflash_fs_cnt;
-static struct vfsmount *ocxlflash_vfs_mount;
-
-static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type ocxlflash_fs_type = {
- .name = "ocxlflash",
- .owner = THIS_MODULE,
- .init_fs_context = ocxlflash_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-/*
- * ocxlflash_release_mapping() - release the memory mapping
- * @ctx: Context whose mapping is to be released.
- */
-static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
-{
- if (ctx->mapping)
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
- ctx->mapping = NULL;
-}
-
-/*
- * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
- * @dev: Generic device of the host.
- * @name: Name of the pseudo filesystem.
- * @fops: File operations.
- * @priv: Private data.
- * @flags: Flags for the file.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_getfile(struct device *dev, const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- if (fops->owner && !try_module_get(fops->owner)) {
- dev_err(dev, "%s: Owner does not exist\n", __func__);
- rc = -ENOENT;
- goto err1;
- }
-
- rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
- &ocxlflash_fs_cnt);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: alloc_file failed rc=%d\n",
- __func__, rc);
- goto err4;
- }
-
- file->private_data = priv;
-out:
- return file;
-err4:
- iput(inode);
-err3:
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
-err2:
- module_put(fops->owner);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_psa_map() - map the process specific MMIO space
- * @ctx_cookie: Adapter context for which the mapping needs to be done.
- *
- * Return: MMIO pointer of the mapped region
- */
-static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return NULL;
- }
- mutex_unlock(&ctx->state_mutex);
-
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-
-/**
- * ocxlflash_psa_unmap() - unmap the process specific MMIO space
- * @addr: MMIO pointer to unmap.
- */
-static void ocxlflash_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-
-/**
- * ocxlflash_process_element() - get process element of the adapter context
- * @ctx_cookie: Adapter context associated with the process element.
- *
- * Return: process element of the adapter context
- */
-static int ocxlflash_process_element(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return ctx->pe;
-}
-
-/**
- * afu_map_irq() - map the interrupt of the adapter context
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
- struct xive_irq_data *xd;
- u32 virq;
- int rc = 0;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- rc = -ENOENT;
- goto out;
- }
-
- irq = &ctx->irqs[num];
- virq = irq_create_mapping(NULL, irq->hwirq);
- if (unlikely(!virq)) {
- dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- rc = request_irq(virq, handler, 0, name, cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- xd = irq_get_handler_data(virq);
- if (unlikely(!xd)) {
- dev_err(dev, "%s: Can't get interrupt data\n", __func__);
- rc = -ENXIO;
- goto err2;
- }
-
- irq->virq = virq;
- irq->vtrig = xd->trig_mmio;
-out:
- return rc;
-err2:
- free_irq(virq, cookie);
-err1:
- irq_dispose_mapping(virq);
- goto out;
-}
-
-/**
- * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie,
- char *name)
-{
- return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
-}
-
-/**
- * afu_unmap_irq() - unmap the interrupt
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- void *cookie)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- return;
- }
-
- irq = &ctx->irqs[num];
-
- if (irq_find_mapping(NULL, irq->hwirq)) {
- free_irq(irq->virq, cookie);
- irq_dispose_mapping(irq->virq);
- }
-
- memset(irq, 0, sizeof(*irq));
-}
-
-/**
- * ocxlflash_unmap_afu_irq() - unmap the interrupt
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- return afu_unmap_irq(0, ctx_cookie, num, cookie);
-}
-
-/**
- * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
- * @ctx_cookie: Context associated with the interrupt.
- * @irq: Interrupt number.
- *
- * Return: effective address of the mapped region
- */
-static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- if (irq < 0 || irq >= ctx->num_irqs)
- return 0;
-
- return (__force u64)ctx->irqs[irq].vtrig;
-}
-
-/**
- * ocxlflash_xsl_fault() - callback when translation error is triggered
- * @data: Private data provided at callback registration, the context.
- * @addr: Address that triggered the error.
- * @dsisr: Value of dsisr register.
- */
-static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
-{
- struct ocxlflash_context *ctx = data;
-
- spin_lock(&ctx->slock);
- ctx->fault_addr = addr;
- ctx->fault_dsisr = dsisr;
- ctx->pending_fault = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-}
-
-/**
- * start_context() - local routine to start a context
- * @ctx: Adapter context to be started.
- *
- * Assign the context specific MMIO space, add and enable the PE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int start_context(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- void *link_token = afu->link_token;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- bool master = ctx->master;
- struct mm_struct *mm;
- int rc = 0;
- u32 pid;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != OPENED) {
- dev_err(dev, "%s: Context state invalid, state=%d\n",
- __func__, ctx->state);
- rc = -EINVAL;
- goto out;
- }
-
- if (master) {
- ctx->psn_size = acfg->global_mmio_size;
- ctx->psn_phys = afu->gmmio_phys;
- } else {
- ctx->psn_size = acfg->pp_mmio_stride;
- ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
- }
-
- /* pid and mm not set for master contexts */
- if (master) {
- pid = 0;
- mm = NULL;
- } else {
- pid = current->mm->context.id;
- mm = current->mm;
- }
-
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
- pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
- ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- ctx->state = STARTED;
-out:
- mutex_unlock(&ctx->state_mutex);
- return rc;
-}
-
-/**
- * ocxlflash_start_context() - start a kernel context
- * @ctx_cookie: Adapter context to be started.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return start_context(ctx);
-}
-
-/**
- * ocxlflash_stop_context() - stop a context
- * @ctx_cookie: Adapter context to be stopped.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_stop_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- enum ocxlflash_ctx_state state;
- int rc = 0;
-
- mutex_lock(&ctx->state_mutex);
- state = ctx->state;
- ctx->state = CLOSED;
- mutex_unlock(&ctx->state_mutex);
- if (state != STARTED)
- goto out;
-
- rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
- ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
- __func__, rc);
- /* If EBUSY, PE could be referenced in future by the AFU */
- if (rc == -EBUSY)
- goto out;
- }
-
- rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
-/**
- * ocxlflash_afu_reset() - reset the AFU
- * @ctx_cookie: Adapter context.
- */
-static int ocxlflash_afu_reset(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- /* Pending implementation from OCXL transport services */
- dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
-
- /* Silently return success until it is implemented */
- return 0;
-}
-
-/**
- * ocxlflash_set_master() - sets the context as master
- * @ctx_cookie: Adapter context to set as master.
- */
-static void ocxlflash_set_master(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- ctx->master = true;
-}
-
-/**
- * ocxlflash_get_context() - obtains the context associated with the host
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the pointer to host adapter context
- */
-static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- return afu->ocxl_ctx;
-}
-
-/**
- * ocxlflash_dev_context_init() - allocate and initialize an adapter context
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the adapter context on success, ERR_PTR on failure
- */
-static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- struct device *dev = afu->dev;
- struct ocxlflash_context *ctx;
- int rc;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(!ctx)) {
- dev_err(dev, "%s: Context allocation failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- idr_preload(GFP_KERNEL);
- rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
- idr_preload_end();
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- spin_lock_init(&ctx->slock);
- init_waitqueue_head(&ctx->wq);
- mutex_init(&ctx->state_mutex);
-
- ctx->state = OPENED;
- ctx->pe = rc;
- ctx->master = false;
- ctx->mapping = NULL;
- ctx->hw_afu = afu;
- ctx->irq_bitmap = 0;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
-out:
- return ctx;
-err2:
- kfree(ctx);
-err1:
- ctx = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_release_context() - releases an adapter context
- * @ctx_cookie: Adapter context to be released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_release_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev;
- int rc = 0;
-
- if (!ctx)
- goto out;
-
- dev = ctx->hw_afu->dev;
- mutex_lock(&ctx->state_mutex);
- if (ctx->state >= STARTED) {
- dev_err(dev, "%s: Context in use, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- rc = -EBUSY;
- goto out;
- }
- mutex_unlock(&ctx->state_mutex);
-
- idr_remove(&ctx->hw_afu->idr, ctx->pe);
- ocxlflash_release_mapping(ctx);
- kfree(ctx);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_perst_reloads_same_image() - sets the image reload policy
- * @afu_cookie: Hardware AFU associated with the host.
- * @image: Whether to load the same image on PERST.
- */
-static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- afu->perst_same_image = image;
-}
-
-/**
- * ocxlflash_read_adapter_vpd() - reads the adapter VPD
- * @pdev: PCI device associated with the host.
- * @buf: Buffer to get the VPD data.
- * @count: Size of buffer (maximum bytes that can be read).
- *
- * Return: size of VPD on success, -errno on failure
- */
-static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
- size_t count)
-{
- return pci_read_vpd(pdev, 0, count, buf);
-}
-
-/**
- * free_afu_irqs() - internal service to free interrupts
- * @ctx: Adapter context.
- */
-static void free_afu_irqs(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- int i;
-
- if (!ctx->irqs) {
- dev_err(dev, "%s: Interrupts not allocated\n", __func__);
- return;
- }
-
- for (i = ctx->num_irqs; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
-
- kfree(ctx->irqs);
- ctx->irqs = NULL;
-}
-
-/**
- * alloc_afu_irqs() - internal service to allocate interrupts
- * @ctx: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irqs;
- int rc = 0;
- int hwirq;
- int i;
-
- if (ctx->irqs) {
- dev_err(dev, "%s: Interrupts already allocated\n", __func__);
- rc = -EEXIST;
- goto out;
- }
-
- if (num > OCXL_MAX_IRQS) {
- dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
- rc = -EINVAL;
- goto out;
- }
-
- irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
- if (unlikely(!irqs)) {
- dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- irqs[i].hwirq = hwirq;
- }
-
- ctx->irqs = irqs;
- ctx->num_irqs = num;
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
- kfree(irqs);
- goto out;
-}
-
-/**
- * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
- * @ctx_cookie: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return alloc_afu_irqs(ctx_cookie, num);
-}
-
-/**
- * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
- * @ctx_cookie: Adapter context.
- */
-static void ocxlflash_free_afu_irqs(void *ctx_cookie)
-{
- free_afu_irqs(ctx_cookie);
-}
-
-/**
- * ocxlflash_unconfig_afu() - unconfigure the AFU
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
-{
- if (afu->gmmio_virt) {
- iounmap(afu->gmmio_virt);
- afu->gmmio_virt = NULL;
- }
-}
-
-/**
- * ocxlflash_destroy_afu() - destroy the AFU structure
- * @afu_cookie: AFU to be freed.
- */
-static void ocxlflash_destroy_afu(void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- int pos;
-
- if (!afu)
- return;
-
- ocxlflash_release_context(afu->ocxl_ctx);
- idr_destroy(&afu->idr);
-
- /* Disable the AFU */
- pos = afu->acfg.dvsec_afu_control_pos;
- ocxl_config_set_afu_state(afu->pdev, pos, 0);
-
- ocxlflash_unconfig_afu(afu);
- kfree(afu);
-}
-
-/**
- * ocxlflash_config_fn() - configure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- u16 base, enabled, supported;
- int rc = 0;
-
- /* Read DVSEC config of the function */
- rc = ocxl_config_read_function(pdev, fcfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Check if function has AFUs defined, only 1 per function supported */
- if (fcfg->max_afu_index >= 0) {
- afu->is_present = true;
- if (fcfg->max_afu_index != 0)
- dev_warn(dev, "%s: Unexpected AFU index value %d\n",
- __func__, fcfg->max_afu_index);
- }
-
- rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- afu->fn_actag_base = base;
- afu->fn_actag_enabled = enabled;
-
- ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
- dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
- __func__, base, enabled);
-
- rc = ocxl_link_setup(pdev, 0, &afu->link_token);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- ocxl_link_release(pdev, afu->link_token);
- goto out;
-}
-
-/**
- * ocxlflash_unconfig_fn() - unconfigure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- ocxl_link_release(pdev, afu->link_token);
-}
-
-/**
- * ocxlflash_map_mmio() - map the AFU MMIO space
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- phys_addr_t gmmio, ppmmio;
- int rc = 0;
-
- rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
- __func__, rc);
- goto out;
- }
- gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
- gmmio += acfg->global_mmio_offset;
-
- rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
- ppmmio += acfg->pp_mmio_offset;
-
- afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
- if (unlikely(!afu->gmmio_virt)) {
- dev_err(dev, "%s: MMIO mapping failed\n", __func__);
- rc = -ENOMEM;
- goto err2;
- }
-
- afu->gmmio_phys = gmmio;
- afu->ppmmio_phys = ppmmio;
-out:
- return rc;
-err2:
- pci_release_region(pdev, acfg->pp_mmio_bar);
-err1:
- pci_release_region(pdev, acfg->global_mmio_bar);
- goto out;
-}
-
-/**
- * ocxlflash_config_afu() - configure the host AFU
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Must be called _after_ host function configuration.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- int count;
- int base;
- int pos;
- int rc = 0;
-
- /* This HW AFU function does not have any AFUs defined */
- if (!afu->is_present)
- goto out;
-
- /* Read AFU config at index 0 */
- rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Only one AFU per function is supported, so actag_base is same */
- base = afu->fn_actag_base;
- count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
- pos = acfg->dvsec_afu_control_pos;
-
- ocxl_config_set_afu_actag(pdev, pos, base, count);
- dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
- afu->afu_actag_base = base;
- afu->afu_actag_enabled = count;
- afu->max_pasid = 1 << acfg->pasid_supported_log;
-
- ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
-
- rc = ocxlflash_map_mmio(afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Enable the AFU */
- ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_create_afu() - create the AFU for OCXL
- * @pdev: PCI device associated with the host.
- *
- * Return: AFU on success, NULL on failure
- */
-static void *ocxlflash_create_afu(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ocxlflash_context *ctx;
- struct ocxl_hw_afu *afu;
- int rc;
-
- afu = kzalloc(sizeof(*afu), GFP_KERNEL);
- if (unlikely(!afu)) {
- dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
- goto out;
- }
-
- afu->pdev = pdev;
- afu->dev = dev;
- idr_init(&afu->idr);
-
- rc = ocxlflash_config_fn(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Function configuration failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- rc = ocxlflash_config_afu(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU configuration failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx = ocxlflash_dev_context_init(pdev, afu);
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- afu->ocxl_ctx = ctx;
-out:
- return afu;
-err3:
- ocxlflash_unconfig_afu(afu);
-err2:
- ocxlflash_unconfig_fn(pdev, afu);
-err1:
- idr_destroy(&afu->idr);
- kfree(afu);
- afu = NULL;
- goto out;
-}
-
-/**
- * ctx_event_pending() - check for any event pending on the context
- * @ctx: Context to be checked.
- *
- * Return: true if there is an event pending, false if none pending
- */
-static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault)
- return true;
-
- return false;
-}
-
-/**
- * afu_poll() - poll the AFU for events on the context
- * @file: File associated with the adapter context.
- * @poll: Poll structure from the user.
- *
- * Return: poll mask
- */
-static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- ulong lock_flags;
- int mask = 0;
-
- poll_wait(file, &ctx->wq, poll);
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
- if (ctx_event_pending(ctx))
- mask |= POLLIN | POLLRDNORM;
- else if (ctx->state == CLOSED)
- mask |= POLLERR;
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
- __func__, ctx->pe, mask);
-
- return mask;
-}
-
-/**
- * afu_read() - perform a read on the context for any event
- * @file: File associated with the adapter context.
- * @buf: Buffer to receive the data.
- * @count: Size of buffer (maximum bytes that can be read).
- * @off: Offset.
- *
- * Return: size of the data read on success, -errno on failure
- */
-static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- struct cxl_event event;
- ulong lock_flags;
- ssize_t esize;
- ssize_t rc;
- int bit;
- DEFINE_WAIT(event_wait);
-
- if (*off != 0) {
- dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
- __func__, *off);
- rc = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
-
- if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- dev_err(dev, "%s: File cannot be blocked on I/O\n",
- __func__);
- rc = -EAGAIN;
- goto err;
- }
-
- if (signal_pending(current)) {
- dev_err(dev, "%s: Signal pending on the process\n",
- __func__);
- rc = -ERESTARTSYS;
- goto err;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- schedule();
- spin_lock_irqsave(&ctx->slock, lock_flags);
- }
-
- finish_wait(&ctx->wq, &event_wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
- esize = sizeof(struct cxl_event_afu_interrupt);
- event.header.size += esize;
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
-
- bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
- clear_bit(bit, &ctx->irq_bitmap);
- event.irq.irq = bit + 1;
- if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- if (copy_to_user(buf, &event, event.header.size)) {
- dev_err(dev, "%s: copy_to_user failed\n", __func__);
- rc = -EFAULT;
- goto out;
- }
-
- rc = event.header.size;
-out:
- return rc;
-err:
- finish_wait(&ctx->wq, &event_wait);
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- goto out;
-}
-
-/**
- * afu_release() - release and free the context
- * @inode: File inode pointer.
- * @file: File associated with the context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_release(struct inode *inode, struct file *file)
-{
- struct ocxlflash_context *ctx = file->private_data;
- int i;
-
- /* Unmap and free the interrupts associated with the context */
- for (i = ctx->num_irqs; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
-
- return ocxlflash_release_context(ctx);
-}
-
-/**
- * ocxlflash_mmap_fault() - mmap fault handler
- * @vmf: VM fault associated with current fault.
- *
- * Return: 0 on success, -errno on failure
- */
-static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ocxlflash_context *ctx = vma->vm_file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- u64 mmio_area, offset;
-
- offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n",
- __func__, ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return VM_FAULT_SIGBUS;
- }
- mutex_unlock(&ctx->state_mutex);
-
- mmio_area = ctx->psn_phys;
- mmio_area += offset;
-
- return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
-}
-
-static const struct vm_operations_struct ocxlflash_vmops = {
- .fault = ocxlflash_mmap_fault,
-};
-
-/**
- * afu_mmap() - map the fault handler operations
- * @file: File associated with the context.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct ocxlflash_context *ctx = file->private_data;
-
- if ((vma_pages(vma) + vma->vm_pgoff) >
- (ctx->psn_size >> PAGE_SHIFT))
- return -EINVAL;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &ocxlflash_vmops;
- return 0;
-}
-
-static const struct file_operations ocxl_afu_fops = {
- .owner = THIS_MODULE,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .mmap = afu_mmap,
-};
-
-#define PATCH_FOPS(NAME) \
- do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
-
-/**
- * ocxlflash_get_fd() - get file descriptor for an adapter context
- * @ctx_cookie: Adapter context.
- * @fops: File operations to be associated.
- * @fd: File descriptor to be returned back.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
- struct file *file;
- int flags, fdtmp;
- int rc = 0;
- char *name = NULL;
-
- /* Only allow one fd per context */
- if (ctx->mapping) {
- dev_err(dev, "%s: Context is already mapped to an fd\n",
- __func__);
- rc = -EEXIST;
- goto err1;
- }
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- fdtmp = rc;
-
- /* Patch the file ops that are not defined */
- if (fops) {
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(mmap);
- } else /* Use default ops */
- fops = (struct file_operations *)&ocxl_afu_fops;
-
- name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
- file = ocxlflash_getfile(dev, name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx->mapping = file->f_mapping;
- *fd = fdtmp;
-out:
- return file;
-err2:
- put_unused_fd(fdtmp);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_fops_get_context() - get the context associated with the file
- * @file: File associated with the adapter context.
- *
- * Return: pointer to the context
- */
-static void *ocxlflash_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-
-/**
- * ocxlflash_afu_irq() - interrupt handler for user contexts
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the context.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
-{
- struct ocxlflash_context *ctx = data;
- struct device *dev = ctx->hw_afu->dev;
- int i;
-
- dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
- __func__, ctx->pe, irq);
-
- for (i = 0; i < ctx->num_irqs; i++) {
- if (ctx->irqs[i].virq == irq)
- break;
- }
- if (unlikely(i >= ctx->num_irqs)) {
- dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
- goto out;
- }
-
- spin_lock(&ctx->slock);
- set_bit(i - 1, &ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * ocxlflash_start_work() - start a user context
- * @ctx_cookie: Context to be started.
- * @num_irqs: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- char *name;
- int rc = 0;
- int i;
-
- rc = alloc_afu_irqs(ctx, num_irqs);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- for (i = 0; i < num_irqs; i++) {
- name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
- dev_name(dev), ctx->pe, i);
- rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
- kfree(name);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
- __func__, rc);
- goto err;
- }
- }
-
- rc = start_context(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
- goto out;
-};
-
-/**
- * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return afu_mmap(file, vma);
-}
-
-/**
- * ocxlflash_fd_release() - release the context associated with the file
- * @inode: File inode pointer.
- * @file: File associated with the adapter context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-
-/* Backend ops to ocxlflash services */
-const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
- .module = THIS_MODULE,
- .psa_map = ocxlflash_psa_map,
- .psa_unmap = ocxlflash_psa_unmap,
- .process_element = ocxlflash_process_element,
- .map_afu_irq = ocxlflash_map_afu_irq,
- .unmap_afu_irq = ocxlflash_unmap_afu_irq,
- .get_irq_objhndl = ocxlflash_get_irq_objhndl,
- .start_context = ocxlflash_start_context,
- .stop_context = ocxlflash_stop_context,
- .afu_reset = ocxlflash_afu_reset,
- .set_master = ocxlflash_set_master,
- .get_context = ocxlflash_get_context,
- .dev_context_init = ocxlflash_dev_context_init,
- .release_context = ocxlflash_release_context,
- .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
- .read_adapter_vpd = ocxlflash_read_adapter_vpd,
- .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
- .free_afu_irqs = ocxlflash_free_afu_irqs,
- .create_afu = ocxlflash_create_afu,
- .destroy_afu = ocxlflash_destroy_afu,
- .get_fd = ocxlflash_get_fd,
- .fops_get_context = ocxlflash_fops_get_context,
- .start_work = ocxlflash_start_work,
- .fd_mmap = ocxlflash_fd_mmap,
- .fd_release = ocxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
deleted file mode 100644
index f2fe88816bea..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
-
-struct ocxlflash_irqs {
- int hwirq;
- u32 virq;
- void __iomem *vtrig;
-};
-
-/* OCXL hardware AFU associated with the host */
-struct ocxl_hw_afu {
- struct ocxlflash_context *ocxl_ctx; /* Host context */
- struct pci_dev *pdev; /* PCI device */
- struct device *dev; /* Generic device */
- bool perst_same_image; /* Same image loaded on perst */
-
- struct ocxl_fn_config fcfg; /* DVSEC config of the function */
- struct ocxl_afu_config acfg; /* AFU configuration data */
-
- int fn_actag_base; /* Function acTag base */
- int fn_actag_enabled; /* Function acTag number enabled */
- int afu_actag_base; /* AFU acTag base */
- int afu_actag_enabled; /* AFU acTag number enabled */
-
- phys_addr_t ppmmio_phys; /* Per process MMIO space */
- phys_addr_t gmmio_phys; /* Global AFU MMIO space */
- void __iomem *gmmio_virt; /* Global MMIO map */
-
- void *link_token; /* Link token for the SPA */
- struct idr idr; /* IDR to manage contexts */
- int max_pasid; /* Maximum number of contexts */
- bool is_present; /* Function has AFUs defined */
-};
-
-enum ocxlflash_ctx_state {
- CLOSED,
- OPENED,
- STARTED
-};
-
-struct ocxlflash_context {
- struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
- struct address_space *mapping; /* Mapping for pseudo filesystem */
- bool master; /* Whether this is a master context */
- int pe; /* Process element */
-
- phys_addr_t psn_phys; /* Process mapping */
- u64 psn_size; /* Process mapping size */
-
- spinlock_t slock; /* Protects irq/fault/event updates */
- wait_queue_head_t wq; /* Wait queue for poll and interrupts */
- struct mutex state_mutex; /* Mutex to update context state */
- enum ocxlflash_ctx_state state; /* Context state */
-
- struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
- int num_irqs; /* Number of interrupts */
- bool pending_irq; /* Pending interrupt on the context */
- ulong irq_bitmap; /* Bits indicating pending irq num */
-
- u64 fault_addr; /* Address that triggered the fault */
- u64 fault_dsisr; /* Value of dsisr register at fault */
- bool pending_fault; /* Pending translation fault */
-};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
deleted file mode 100644
index ab315c59505b..000000000000
--- a/drivers/scsi/cxlflash/sislite.h
+++ /dev/null
@@ -1,560 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _SISLITE_H
-#define _SISLITE_H
-
-#include <linux/types.h>
-
-typedef u16 ctx_hndl_t;
-typedef u32 res_hndl_t;
-
-#define SIZE_4K 4096
-#define SIZE_64K 65536
-
-/*
- * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
- * except for SCSI CDB which remains big endian per SCSI standards.
- */
-struct sisl_ioarcb {
- u16 ctx_id; /* ctx_hndl_t */
- u16 req_flags;
-#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
-#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
-
-#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
-
-#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
-#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
-#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
-#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
-
-#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
-
-#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
-
-#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
-#define SISL_REQ_FLAGS_HOST_READ 0x0000U
-
- union {
- u32 res_hndl; /* res_hndl_t */
- u32 port_sel; /* this is a selection mask:
- * 0x1 -> port#0 can be selected,
- * 0x2 -> port#1 can be selected.
- * Can be bitwise ORed.
- */
- };
- u64 lun_id;
- u32 data_len; /* 4K for read/write */
- u32 ioadl_len;
- union {
- u64 data_ea; /* min 16 byte aligned */
- u64 ioadl_ea;
- };
- u8 msi; /* LISN to send on RRQ write */
-#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
-#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
-#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
-#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
-
- u8 rrq; /* 0 for a single RRQ */
- u16 timeout; /* in units specified by req_flags */
- u32 rsvd1;
- u8 cdb[16]; /* must be in big endian */
-#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
-#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
-#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
-
-#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
-#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
-
- union {
- u64 reserved; /* Reserved for IOARRIN mode */
- struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
- };
-} __packed;
-
-struct sisl_rc {
- u8 flags;
-#define SISL_RC_FLAGS_SENSE_VALID 0x80U
-#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
-#define SISL_RC_FLAGS_OVERRUN 0x20U
-#define SISL_RC_FLAGS_UNDERRUN 0x10U
-
- u8 afu_rc;
-#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
-#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
-#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
-#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
-#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
-#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
-#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
-
-#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
-
- /* NO_CHANNELS means the FC ports selected by dest_port in
- * IOARCB or in the LXT entry are down when the AFU tried to select
- * a FC port. If the port went down on an active IO, it will set
- * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
- */
-#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
-#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
- * afu reset/master restart
- */
-#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
-#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
- * may retry if afu_retry is off
- */
-
- u8 scsi_rc; /* SCSI status byte, retry as appropriate */
-#define SISL_SCSI_RC_CHECK 0x02U
-#define SISL_SCSI_RC_BUSY 0x08u
-
- u8 fc_rc; /* retry */
- /*
- * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
- * commands that are in flight when a link goes down or is logged out.
- * If the link is down or logged out before AFU selects the port, either
- * it will choose the other port or we will get afu_rc=0x20 (no_channel)
- * if there is no valid port to use.
- *
- * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
- * would happen if a frame is dropped and something times out.
- * NOLOGI or LINKDOWN can be retried if the other port is up.
- * RESIDERR can be retried as well.
- *
- * ABORTFAIL might indicate that lots of frames are getting CRC errors.
- * So it maybe retried once and reset the link if it happens again.
- * The link can also be reset on the CRC error threshold interrupt.
- */
-#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
-#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
-#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
-#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
-#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
-#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
-#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
-#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
-#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
-#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- * reported len, possibly due to dropped
- * frames
- */
-#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
-};
-
-#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
-#define SISL_WWID_DATA_LEN 16 /* WWID data length */
-
-/*
- * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
- * host native endianness
- */
-struct sisl_ioasa {
- union {
- struct sisl_rc rc;
- u32 ioasc;
-#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
- };
-
- union {
- u32 resid;
- u32 lunid_hi;
- };
-
- u8 port;
- u8 afu_extra;
- /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
- * afu_exta contains PSL response code. Useful codes are:
- */
-#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
- * Enabled N/A
- * Disabled retry
- */
-#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
- * afu_rc Implies
- * 0x04, 0x14 master exit.
- * 0x31 user error.
- */
- /* when afu rc=0x20 (no channels):
- * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
- */
-#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
-#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
-
- u8 scsi_extra;
- u8 fc_extra;
-
- union {
- u8 sense_data[SISL_SENSE_DATA_LEN];
- struct {
- u32 lunid_lo;
- u8 wwid[SISL_WWID_DATA_LEN];
- };
- };
-
- /* These fields are defined by the SISlite architecture for the
- * host to use as they see fit for their implementation.
- */
- union {
- u64 host_use[4];
- u8 host_use_b[32];
- };
-} __packed;
-
-#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
-
-/* MMIO space is required to support only 64-bit access */
-
-/*
- * This AFU has two mechanisms to deal with endian-ness.
- * One is a global configuration (in the afu_config) register
- * below that specifies the endian-ness of the host.
- * The other is a per context (i.e. application) specification
- * controlled by the endian_ctrl field here. Since the master
- * context is one such application the master context's
- * endian-ness is set to be the same as the host.
- *
- * As per the SISlite spec, the MMIO registers are always
- * big endian.
- */
-#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
-#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
-
-#ifdef __BIG_ENDIAN
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
-#else
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
-#endif
-
-/* per context host transport MMIO */
-struct sisl_host_map {
- __be64 endian_ctrl; /* Per context Endian Control. The AFU will
- * operate on whatever the context is of the
- * host application.
- */
-
- __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
- * Only recovery in a PERM_ERR is a context
- * exit since there is no way to tell which
- * command caused the error.
- */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
-#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
- /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
- * Same error in data/LXT/RHT access is reported via IOASA.
- */
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
- * generated when AFU
- * auto retry is
- * disabled. If user
- * can determine the
- * command that caused
- * the error, it can
- * be retried.
- */
-#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
-#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
-
- __be64 intr_clear;
- __be64 intr_mask;
- __be64 ioarrin; /* only write what cmd_room permits */
- __be64 rrq_start; /* start & end are both inclusive */
- __be64 rrq_end; /* write sequence: start followed by end */
- __be64 cmd_room;
- __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
-#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
-#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
- __be64 mbox_w; /* restricted use */
- __be64 sq_start; /* Submission Queue (R/W): write sequence and */
- __be64 sq_end; /* inclusion semantics are the same as RRQ */
- __be64 sq_head; /* Submission Queue Head (R): for debugging */
- __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
- __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
-};
-
-/* per context provisioning & control MMIO */
-struct sisl_ctrl_map {
- __be64 rht_start;
- __be64 rht_cnt_id;
- /* both cnt & ctx_id args must be ULL */
-#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
-
- __be64 ctx_cap; /* afu_rc below is when the capability is violated */
-#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
-#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
- __be64 mbox_r;
- __be64 lisn_pasid[2];
- /* pasid _a arg must be ULL */
-#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
- __be64 lisn_ea[3];
-};
-
-/* single copy global regs */
-struct sisl_global_regs {
- __be64 aintr_status;
- /*
- * In cxlflash, FC port/link are arranged in port pairs, each
- * gets a byte of status:
- *
- * *_OTHER: other err, FC_ERRCAP[31:20]
- * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in
- * *_CRC_T: CRC threshold exceeded
- * *_LOGI_R: login state machine timed out and retrying
- * *_LOGI_F: login failed, FC_ERROR[19:0]
- * *_LOGI_S: login succeeded
- * *_LINK_DN: link online to offline
- * *_LINK_UP: link offline to online
- */
-#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */
-#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */
-#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */
-#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */
-#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */
-#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */
-#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */
-#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */
-
-#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */
-#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */
-#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */
-#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */
-#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */
-#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */
-#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */
-#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */
-
-#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */
-#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */
-#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */
-#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */
-#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */
-#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */
-#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */
-#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */
-
-#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */
-#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */
-#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */
-#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */
-#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */
-#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */
-#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */
-#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */
-
-#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
-#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
-#define SISL_FC_INTERNAL_SHIFT 32
-
-#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL
-#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL
-
-#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
-#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
-
-#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */
-#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
-
- __be64 aintr_clear;
- __be64 aintr_mask;
- __be64 afu_ctrl;
- __be64 afu_hb;
- __be64 afu_scratch_pad;
- __be64 afu_port_sel;
-#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
-#define SISL_AFUCONF_AR_LXT 0x2000ULL
-#define SISL_AFUCONF_AR_RHT 0x1000ULL
-#define SISL_AFUCONF_AR_DATA 0x0800ULL
-#define SISL_AFUCONF_AR_RSRC 0x0400ULL
-#define SISL_AFUCONF_AR_IOASA 0x0200ULL
-#define SISL_AFUCONF_AR_RRQ 0x0100ULL
-/* Aggregate all Auto Retry Bits */
-#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
- SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
- SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
- SISL_AFUCONF_AR_RRQ)
-#ifdef __BIG_ENDIAN
-#define SISL_AFUCONF_ENDIAN 0x0000ULL
-#else
-#define SISL_AFUCONF_ENDIAN 0x0020ULL
-#endif
-#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
- __be64 afu_config;
- __be64 rsvd[0xf8];
- __le64 afu_version;
- __be64 interface_version;
-#define SISL_INTVER_CAP_SHIFT 16
-#define SISL_INTVER_MAJ_SHIFT 8
-#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
-#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
-#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
-#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
-#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
-#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
-#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
-#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
-};
-
-#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
-#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */
-#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \
- CXLFLASH_MAX_FC_BANKS)
-#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */
-#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */
-#define CXLFLASH_NUM_REGS 512 /* number of registers per port */
-
-struct fc_port_bank {
- __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS];
- __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS];
-};
-
-struct sisl_global_map {
- union {
- struct sisl_global_regs regs;
- char page0[SIZE_4K]; /* page 0 */
- };
-
- char page1[SIZE_4K]; /* page 1 */
-
- struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */
-
- /* pages 10 - 15 are reserved */
-
-};
-
-/*
- * CXL Flash Memory Map
- *
- * +-------------------------------+
- * | 512 * 64 KB User MMIO |
- * | (per context) |
- * | User Accessible |
- * +-------------------------------+
- * | 512 * 128 B per context |
- * | Provisioning and Control |
- * | Trusted Process accessible |
- * +-------------------------------+
- * | 64 KB Global |
- * | Trusted Process accessible |
- * +-------------------------------+
- */
-struct cxlflash_afu_map {
- union {
- struct sisl_host_map host;
- char harea[SIZE_64K]; /* 64KB each */
- } hosts[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_ctrl_map ctrl;
- char carea[cache_line_size()]; /* 128B each */
- } ctrls[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_global_map global;
- char garea[SIZE_64K]; /* 64KB single block */
- };
-};
-
-/*
- * LXT - LBA Translation Table
- * LXT control blocks
- */
-struct sisl_lxt_entry {
- u64 rlba_base; /* bits 0:47 is base
- * b48:55 is lun index
- * b58:59 is write & read perms
- * (if no perm, afu_rc=0x15)
- * b60:63 is port_sel mask
- */
-};
-
-/*
- * RHT - Resource Handle Table
- * Per the SISlite spec, RHT entries are to be 16-byte aligned
- */
-struct sisl_rht_entry {
- struct sisl_lxt_entry *lxt_start;
- u32 lxt_cnt;
- u16 rsvd;
- u8 fp; /* format & perm nibbles.
- * (if no perm, afu_rc=0x05)
- */
- u8 nmask;
-} __packed __aligned(16);
-
-struct sisl_rht_entry_f1 {
- u64 lun_id;
- union {
- struct {
- u8 valid;
- u8 rsvd[5];
- u8 fp;
- u8 port_sel;
- };
-
- u64 dw;
- };
-} __packed __aligned(16);
-
-/* make the fp byte */
-#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
-
-/* make the fp byte for a clone from a source fp and clone flags
- * flags must be only 2 LSB bits.
- */
-#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
-
-#define RHT_PERM_READ 0x01U
-#define RHT_PERM_WRITE 0x02U
-#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
-
-/* extract the perm bits from a fp */
-#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
-
-#define PORT0 0x01U
-#define PORT1 0x02U
-#define PORT2 0x04U
-#define PORT3 0x08U
-#define PORT_MASK(_n) ((1 << (_n)) - 1)
-
-/* AFU Sync Mode byte */
-#define AFU_LW_SYNC 0x0U
-#define AFU_HW_SYNC 0x1U
-#define AFU_GSYNC 0x2U
-
-/* Special Task Management Function CDB */
-#define TMF_LUN_RESET 0x1U
-#define TMF_CLEAR_ACA 0x2U
-
-#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
deleted file mode 100644
index b375509d1470..000000000000
--- a/drivers/scsi/cxlflash/superpipe.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-struct cxlflash_global global;
-
-/**
- * marshal_rele_to_resize() - translate release to resize structure
- * @release: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = release->hdr;
- resize->context_id = release->context_id;
- resize->rsrc_handle = release->rsrc_handle;
-}
-
-/**
- * marshal_det_to_rele() - translate detach to release structure
- * @detach: Destination structure for the translate/copy.
- * @release: Source structure from which to translate/copy.
- */
-static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
- struct dk_cxlflash_release *release)
-{
- release->hdr = detach->hdr;
- release->context_id = detach->context_id;
-}
-
-/**
- * marshal_udir_to_rele() - translate udirect to release structure
- * @udirect: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
- struct dk_cxlflash_release *release)
-{
- release->hdr = udirect->hdr;
- release->context_id = udirect->context_id;
- release->rsrc_handle = udirect->rsrc_handle;
-}
-
-/**
- * cxlflash_free_errpage() - frees resources associated with global error page
- */
-void cxlflash_free_errpage(void)
-{
-
- mutex_lock(&global.mutex);
- if (global.err_page) {
- __free_page(global.err_page);
- global.err_page = NULL;
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
- * @cfg: Internal structure associated with the host.
- *
- * When the host needs to go down, all users must be quiesced and their
- * memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear down.
- * Meanwhile, this routine camps until all user contexts have been removed.
- *
- * Note that the main loop in this routine will always execute at least once
- * to flush the reset_waitq.
- */
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int i, found = true;
-
- cxlflash_mark_contexts_error(cfg);
-
- while (true) {
- for (i = 0; i < MAX_CONTEXT; i++)
- if (cfg->ctx_tbl[i]) {
- found = true;
- break;
- }
-
- if (!found && list_empty(&cfg->ctx_err_recovery))
- return;
-
- dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
- __func__);
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- found = false;
- }
-}
-
-/**
- * find_error_context() - locates a context by cookie on the error recovery list
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context by id.
- * @file: Desired context by file.
- *
- * Return: Found context on success, NULL on failure
- */
-static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
- struct file *file)
-{
- struct ctx_info *ctxi;
-
- list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
- if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
- return ctxi;
-
- return NULL;
-}
-
-/**
- * get_context() - obtains a validated and locked context reference
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context (raw, un-decoded format).
- * @arg: LUN information or file associated with request.
- * @ctx_ctrl: Control information to 'steer' desired lookup.
- *
- * NOTE: despite the name pid, in linux, current->pid actually refers
- * to the lightweight process id (tid) and can change if the process is
- * multi threaded. The tgid remains constant for the process and only changes
- * when the process of fork. For all intents and purposes, think of tgid
- * as a pid in the traditional sense.
- *
- * Return: Validated context on success, NULL on failure
- */
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
- void *arg, enum ctx_ctrl ctx_ctrl)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- struct file *file = NULL;
- struct llun_info *lli = arg;
- u64 ctxid = DECODE_CTXID(rctxid);
- int rc;
- pid_t pid = task_tgid_nr(current), ctxpid = 0;
-
- if (ctx_ctrl & CTX_CTRL_FILE) {
- lli = NULL;
- file = (struct file *)arg;
- }
-
- if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = task_ppid_nr(current);
-
- if (likely(ctxid < MAX_CONTEXT)) {
- while (true) {
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- ctxi = cfg->ctx_tbl[ctxid];
- if (ctxi)
- if ((file && (ctxi->file != file)) ||
- (!file && (ctxi->ctxid != rctxid)))
- ctxi = NULL;
-
- if ((ctx_ctrl & CTX_CTRL_ERR) ||
- (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
- ctxi = find_error_context(cfg, rctxid, file);
- if (!ctxi) {
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- goto out;
- }
-
- /*
- * Need to acquire ownership of the context while still
- * under the table/list lock to serialize with a remove
- * thread. Use the 'try' to avoid stalling the
- * table/list lock for a single context.
- *
- * Note that the lock order is:
- *
- * cfg->ctx_tbl_list_mutex -> ctxi->mutex
- *
- * Therefore release ctx_tbl_list_mutex before retrying.
- */
- rc = mutex_trylock(&ctxi->mutex);
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- if (rc)
- break; /* got the context's lock! */
- }
-
- if (ctxi->unavail)
- goto denied;
-
- ctxpid = ctxi->pid;
- if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
- if (pid != ctxpid)
- goto denied;
-
- if (lli) {
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli)
- goto out;
- goto denied;
- }
- }
-
-out:
- dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
- "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
- ctx_ctrl);
-
- return ctxi;
-
-denied:
- mutex_unlock(&ctxi->mutex);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * put_context() - release a context that was retrieved from get_context()
- * @ctxi: Context to release.
- *
- * For now, releasing the context equates to unlocking it's mutex.
- */
-void put_context(struct ctx_info *ctxi)
-{
- mutex_unlock(&ctxi->mutex);
-}
-
-/**
- * afu_attach() - attach a context to the AFU
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to attach.
- *
- * Upon setting the context capabilities, they must be confirmed with
- * a read back operation as the context might have been closed since
- * the mailbox was unlocked. When this occurs, registration is failed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
-{
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
- int rc = 0;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 val;
- int i;
-
- /* Unlock cap and restrict user to read/write cmds in translated mode */
- readq_be(&ctrl_map->mbox_r);
- val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
- writeq_be(val, &ctrl_map->ctx_cap);
- val = readq_be(&ctrl_map->ctx_cap);
- if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
- dev_err(dev, "%s: ctx may be closed val=%016llx\n",
- __func__, val);
- rc = -EAGAIN;
- goto out;
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each interrupt */
- for (i = 0; i < ctxi->irqs; i++) {
- val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
- writeq_be(val, &ctrl_map->lisn_ea[i]);
- }
-
- /* Use primary HWQ PASID as identifier for all interrupts */
- val = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
- }
-
- /* Set up MMIO registers pointing to the RHT */
- writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
- val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
- writeq_be(val, &ctrl_map->rht_cnt_id);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * read_cap16() - issues a SCSI READ_CAP16 command
- * @sdev: SCSI device associated with LUN.
- * @lli: LUN destined for capacity request.
- *
- * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl
- * read semaphore. This will allow the EEH handler to proceed with a recovery
- * while this thread is still running. Once the scsi_execute_cmd() returns,
- * reacquire the ioctl read semaphore and check the adapter state in case it
- * changed while inside of scsi_execute_cmd(). The state check will wait if the
- * adapter is still being recovered or return a failure if the recovery failed.
- * In the event that the adapter reset failed, simply return the failure as the
- * ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = lli->parent;
- struct scsi_sense_hdr sshdr;
- const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
- };
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- int retry_cnt = 0;
- u32 to = CMD_TIMEOUT * HZ;
-
-retry:
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
- scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
- put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
-
- dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
- retry_cnt ? "re" : "", scsi_cmd[0]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
- CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result > 0 && scsi_sense_valid(&sshdr)) {
- if (result & SAM_STAT_CHECK_CONDITION) {
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- result &= ~SAM_STAT_CHECK_CONDITION;
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device capacity changed */
- case 0x3F: /* Report LUNs changed */
- /* Retry the command once more */
- if (retry_cnt++ < 1) {
- kfree(cmd_buf);
- kfree(scsi_cmd);
- goto retry;
- }
- }
- break;
- default:
- break;
- }
- }
- }
-
- if (result) {
- dev_err(dev, "%s: command failed, result=%08x\n",
- __func__, result);
- rc = -EIO;
- goto out;
- }
-
- /*
- * Read cap was successful, grab values from the buffer;
- * note that we don't need to worry about unaligned access
- * as the buffer is allocated on an aligned boundary.
- */
- mutex_lock(&gli->mutex);
- gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
- gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
- mutex_unlock(&gli->mutex);
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
-
- dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
- __func__, gli->max_lba, gli->blk_len, rc);
- return rc;
-}
-
-/**
- * get_rhte() - obtains validated resource handle table entry reference
- * @ctxi: Context owning the resource handle.
- * @rhndl: Resource handle associated with entry.
- * @lli: LUN associated with request.
- *
- * Return: Validated RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
-
- if (unlikely(!ctxi->rht_start)) {
- dev_dbg(dev, "%s: Context does not have allocated RHT\n",
- __func__);
- goto out;
- }
-
- if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
- dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- rhte = &ctxi->rht_start[rhndl];
- if (unlikely(rhte->nmask == 0)) {
- dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
- __func__, rhndl);
- rhte = NULL;
- goto out;
- }
-
-out:
- return rhte;
-}
-
-/**
- * rhte_checkout() - obtains free/empty resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @lli: LUN associated with request.
- *
- * Return: Free RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
- int i;
-
- /* Find a free RHT entry */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi->rht_start[i].nmask == 0) {
- rhte = &ctxi->rht_start[i];
- ctxi->rht_out++;
- break;
- }
-
- if (likely(rhte))
- ctxi->rht_lun[i] = lli;
-
- dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
- return rhte;
-}
-
-/**
- * rhte_checkin() - releases a resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @rhte: RHTE to release.
- */
-void rhte_checkin(struct ctx_info *ctxi,
- struct sisl_rht_entry *rhte)
-{
- u32 rsrc_handle = rhte - ctxi->rht_start;
-
- rhte->nmask = 0;
- rhte->fp = 0;
- ctxi->rht_out--;
- ctxi->rht_lun[rsrc_handle] = NULL;
- ctxi->rht_needs_ws[rsrc_handle] = false;
-}
-
-/**
- * rht_format1() - populates a RHTE for format 1
- * @rhte: RHTE to populate.
- * @lun_id: LUN ID of LUN associated with RHTE.
- * @perm: Desired permissions for RHTE.
- * @port_sel: Port selection mask
- */
-static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
- u32 port_sel)
-{
- /*
- * Populate the Format 1 RHT entry for direct access (physical
- * LUN) using the synchronization sequence defined in the
- * SISLite specification.
- */
- struct sisl_rht_entry_f1 dummy = { 0 };
- struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- memset(rhte_f1, 0, sizeof(*rhte_f1));
- rhte_f1->fp = SISL_RHT_FP(1U, 0);
- dma_wmb(); /* Make setting of format bit visible */
-
- rhte_f1->lun_id = lun_id;
- dma_wmb(); /* Make setting of LUN id visible */
-
- /*
- * Use a dummy RHT Format 1 entry to build the second dword
- * of the entry that must be populated in a single write when
- * enabled (valid bit set to TRUE).
- */
- dummy.valid = 0x80;
- dummy.fp = SISL_RHT_FP(1U, perm);
- dummy.port_sel = port_sel;
- rhte_f1->dw = dummy.dw;
-
- dma_wmb(); /* Make remaining RHT entry fields visible */
-}
-
-/**
- * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
- * @gli: LUN to attach.
- * @mode: Desired mode of the LUN.
- * @locked: Mutex status on current thread.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
-{
- int rc = 0;
-
- if (!locked)
- mutex_lock(&gli->mutex);
-
- if (gli->mode == MODE_NONE)
- gli->mode = mode;
- else if (gli->mode != mode) {
- pr_debug("%s: gli_mode=%d requested_mode=%d\n",
- __func__, gli->mode, mode);
- rc = -EINVAL;
- goto out;
- }
-
- gli->users++;
- WARN_ON(gli->users <= 0);
-out:
- pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
- __func__, rc, gli->mode, gli->users);
- if (!locked)
- mutex_unlock(&gli->mutex);
- return rc;
-}
-
-/**
- * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
- * @gli: LUN to detach.
- *
- * When resetting the mode, terminate block allocation resources as they
- * are no longer required (service is safe to call even when block allocation
- * resources were not present - such as when transitioning from physical mode).
- * These resources will be reallocated when needed (subsequent transition to
- * virtual mode).
- */
-void cxlflash_lun_detach(struct glun_info *gli)
-{
- mutex_lock(&gli->mutex);
- WARN_ON(gli->mode == MODE_NONE);
- if (--gli->users == 0) {
- gli->mode = MODE_NONE;
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- }
- pr_debug("%s: gli->users=%u\n", __func__, gli->users);
- WARN_ON(gli->users < 0);
- mutex_unlock(&gli->mutex);
-}
-
-/**
- * _cxlflash_disk_release() - releases the specified resource entry
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @release: Release ioctl data structure.
- *
- * For LUNs in virtual mode, the virtual LUN associated with the specified
- * resource handle is resized to 0 prior to releasing the RHTE. Note that the
- * AFU sync should _not_ be performed when the context is sitting on the error
- * recovery list. A context on the error recovery list is not known to the AFU
- * due to reset. When the context is recovered, it will be reattached and made
- * known again to the AFU.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_disk_release(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_release *release)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- struct dk_cxlflash_resize size;
- res_hndl_t rhndl = release->rsrc_handle;
-
- int rc = 0;
- int rcr = 0;
- u64 ctxid = DECODE_CTXID(release->context_id),
- rctxid = release->context_id;
-
- struct sisl_rht_entry *rhte;
- struct sisl_rht_entry_f1 *rhte_f1;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
- __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Resize to 0 for virtual LUNS by setting the size
- * to 0. This will clear LXT_START and LXT_CNT fields
- * in the RHT entry and properly sync with the AFU.
- *
- * Afterwards we clear the remaining fields.
- */
- switch (gli->mode) {
- case MODE_VIRTUAL:
- marshal_rele_to_resize(release, &size);
- size.req_size = 0;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
- if (rc) {
- dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
- goto out;
- }
-
- break;
- case MODE_PHYSICAL:
- /*
- * Clear the Format 1 RHT entry for direct access
- * (physical LUN) using the synchronization sequence
- * defined in the SISLite specification.
- */
- rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- rhte_f1->valid = 0;
- dma_wmb(); /* Make revocation of RHT entry visible */
-
- rhte_f1->lun_id = 0;
- dma_wmb(); /* Make clearing of LUN id visible */
-
- rhte_f1->dw = 0;
- dma_wmb(); /* Make RHT entry bottom-half clearing visible */
-
- if (!ctxi->err_recovery_active) {
- rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rcr))
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
- __func__, rcr);
- }
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- goto out;
- }
-
- rhte_checkin(ctxi, rhte);
- cxlflash_lun_detach(gli);
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-int cxlflash_disk_release(struct scsi_device *sdev, void *release)
-{
- return _cxlflash_disk_release(sdev, NULL, release);
-}
-
-/**
- * destroy_context() - releases a context
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- *
- * This routine is safe to be called with a a non-initialized context.
- * Also note that the routine conditionally checks for the existence
- * of the context control map before clearing the RHT registers and
- * context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was
- * removed [so there is no need to worry about clearing] and context
- * is waiting for a new mapping).
- */
-static void destroy_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi)
-{
- struct afu *afu = cfg->afu;
-
- if (ctxi->initialized) {
- WARN_ON(!list_empty(&ctxi->luns));
-
- /* Clear RHT registers and drop all capabilities for context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
- }
- }
-
- /* Free memory associated with context */
- free_page((ulong)ctxi->rht_start);
- kfree(ctxi->rht_needs_ws);
- kfree(ctxi->rht_lun);
- kfree(ctxi);
-}
-
-/**
- * create_context() - allocates and initializes a context
- * @cfg: Internal structure associated with the host.
- *
- * Return: Allocated context on success, NULL on failure
- */
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct llun_info **lli = NULL;
- u8 *ws = NULL;
- struct sisl_rht_entry *rhte;
-
- ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
- lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
- ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
- if (unlikely(!ctxi || !lli || !ws)) {
- dev_err(dev, "%s: Unable to allocate context\n", __func__);
- goto err;
- }
-
- rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
- goto err;
- }
-
- ctxi->rht_lun = lli;
- ctxi->rht_needs_ws = ws;
- ctxi->rht_start = rhte;
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * init_context() - initializes a previously allocated context
- * @ctxi: Previously allocated context
- * @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained context cookie.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- * @irqs: User-specified number of interrupts.
- */
-static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- void *ctx, int ctxid, struct file *file, u32 perms,
- u64 irqs)
-{
- struct afu *afu = cfg->afu;
-
- ctxi->rht_perms = perms;
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->irqs = irqs;
- ctxi->pid = task_tgid_nr(current); /* tgid = pid */
- ctxi->ctx = ctx;
- ctxi->cfg = cfg;
- ctxi->file = file;
- ctxi->initialized = true;
- mutex_init(&ctxi->mutex);
- kref_init(&ctxi->kref);
- INIT_LIST_HEAD(&ctxi->luns);
- INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
-}
-
-/**
- * remove_context() - context kref release handler
- * @kref: Kernel reference associated with context to be removed.
- *
- * When a context no longer has any references it can safely be removed
- * from global access and destroyed. Note that it is assumed the thread
- * relinquishing access to the context holds its mutex.
- */
-static void remove_context(struct kref *kref)
-{
- struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
- struct cxlflash_cfg *cfg = ctxi->cfg;
- u64 ctxid = DECODE_CTXID(ctxi->ctxid);
-
- /* Remove context from table/error list */
- WARN_ON(!mutex_is_locked(&ctxi->mutex));
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
-
- if (!list_empty(&ctxi->list))
- list_del(&ctxi->list);
- cfg->ctx_tbl[ctxid] = NULL;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- mutex_unlock(&ctxi->mutex);
-
- /* Context now completely uncoupled/unreachable */
- destroy_context(cfg, ctxi);
-}
-
-/**
- * _cxlflash_disk_detach() - detaches a LUN from a context
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @detach: Detach ioctl data structure.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int _cxlflash_disk_detach(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_detach *detach)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct lun_access *lun_access, *t;
- struct dk_cxlflash_release rel;
- bool put_ctx = false;
-
- int i;
- int rc = 0;
- u64 ctxid = DECODE_CTXID(detach->context_id),
- rctxid = detach->context_id;
-
- dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- /* Cleanup outstanding resources tied to this LUN */
- if (ctxi->rht_out) {
- marshal_det_to_rele(detach, &rel);
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi->rht_lun[i] == lli) {
- rel.rsrc_handle = i;
- _cxlflash_disk_release(sdev, ctxi, &rel);
- }
-
- /* No need to loop further if we're done */
- if (ctxi->rht_out == 0)
- break;
- }
- }
-
- /* Take our LUN out of context, free the node */
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- list_del(&lun_access->list);
- kfree(lun_access);
- lun_access = NULL;
- break;
- }
-
- /*
- * Release the context reference and the sdev reference that
- * bound this LUN to the context.
- */
- if (kref_put(&ctxi->kref, remove_context))
- put_ctx = false;
- scsi_device_put(sdev);
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
-{
- return _cxlflash_disk_detach(sdev, NULL, detach);
-}
-
-/**
- * cxlflash_cxl_release() - release handler for adapter file descriptor
- * @inode: File-system inode associated with fd.
- * @file: File installed with adapter file descriptor.
- *
- * This routine is the release handler for the fops registered with
- * the CXL services on an initial attach for a context. It is called
- * when a close (explicity by the user or as part of a process tear
- * down) is performed on the adapter file descriptor returned to the
- * user. The user should be aware that explicitly performing a close
- * considered catastrophic and subsequent usage of the superpipe API
- * with previously saved off tokens will fail.
- *
- * This routine derives the context reference and calls detach for
- * each LUN associated with the context.The final detach operation
- * causes the context itself to be freed. With exception to when the
- * CXL process element (context id) lookup fails (a case that should
- * theoretically never occur), every call into this routine results
- * in a complete freeing of a context.
- *
- * Detaching the LUN is typically an ioctl() operation and the underlying
- * code assumes that ioctl_rwsem has been acquired as a reader. To support
- * that design point, the semaphore is acquired and released around detach.
- *
- * Return: 0 on success
- */
-static int cxlflash_cxl_release(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct dk_cxlflash_detach detach = { { 0 }, 0 };
- struct lun_access *lun_access, *t;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
- if (!ctxi) {
- dev_dbg(dev, "%s: ctxid=%d already free\n",
- __func__, ctxid);
- goto out_release;
- }
-
- dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
- __func__, ctxid);
- put_context(ctxi);
- goto out;
- }
-
- dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
-
- down_read(&cfg->ioctl_rwsem);
- detach.context_id = ctxi->ctxid;
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
- up_read(&cfg->ioctl_rwsem);
-out_release:
- cfg->ops->fd_release(inode, file);
-out:
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
-}
-
-/**
- * unmap_context() - clears a previously established mapping
- * @ctxi: Context owning the mapping.
- *
- * This routine is used to switch between the error notification page
- * (dummy page of all 1's) and the real mapping (established by the CXL
- * fault handler).
- */
-static void unmap_context(struct ctx_info *ctxi)
-{
- unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
-}
-
-/**
- * get_err_page() - obtains and allocates the error notification page
- * @cfg: Internal structure associated with the host.
- *
- * Return: error notification page on success, NULL on failure
- */
-static struct page *get_err_page(struct cxlflash_cfg *cfg)
-{
- struct page *err_page = global.err_page;
- struct device *dev = &cfg->dev->dev;
-
- if (unlikely(!err_page)) {
- err_page = alloc_page(GFP_KERNEL);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Unable to allocate err_page\n",
- __func__);
- goto out;
- }
-
- memset(page_address(err_page), -1, PAGE_SIZE);
-
- /* Serialize update w/ other threads to avoid a leak */
- mutex_lock(&global.mutex);
- if (likely(!global.err_page))
- global.err_page = err_page;
- else {
- __free_page(err_page);
- err_page = global.err_page;
- }
- mutex_unlock(&global.mutex);
- }
-
-out:
- dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
- return err_page;
-}
-
-/**
- * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vmf: VM fault associated with current fault.
- *
- * To support error notification via MMIO, faults are 'caught' by this routine
- * that was inserted before passing back the adapter file descriptor on attach.
- * When a fault occurs, this routine evaluates if error recovery is active and
- * if so, installs the error page to 'notify' the user about the error state.
- * During normal operation, the fault is simply handled by the original fault
- * handler that was installed by CXL services as part of initializing the
- * adapter file descriptor. The VMA's page protection bits are toggled to
- * indicate cached/not-cached depending on the memory backing the fault.
- *
- * Return: 0 on success, VM_FAULT_SIGBUS on failure
- */
-static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct page *err_page = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- vm_fault_t rc = 0;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto err;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- goto err;
- }
-
- dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
-
- if (likely(!ctxi->err_recovery_active)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- rc = ctxi->cxl_mmap_vmops->fault(vmf);
- } else {
- dev_dbg(dev, "%s: err recovery active, use err_page\n",
- __func__);
-
- err_page = get_err_page(cfg);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Could not get err_page\n", __func__);
- rc = VM_FAULT_RETRY;
- goto out;
- }
-
- get_page(err_page);
- vmf->page = err_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
- return rc;
-
-err:
- rc = VM_FAULT_SIGBUS;
- goto out;
-}
-
-/*
- * Local MMAP vmops to 'catch' faults
- */
-static const struct vm_operations_struct cxlflash_mmap_vmops = {
- .fault = cxlflash_mmap_fault,
-};
-
-/**
- * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Installs local mmap vmops to 'catch' faults for error notification support.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
- int rc = 0;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- rc = -EIO;
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- rc = -EIO;
- goto out;
- }
-
- dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
-
- rc = cfg->ops->fd_mmap(file, vma);
- if (likely(!rc)) {
- /* Insert ourself in the mmap fault handler path */
- ctxi->cxl_mmap_vmops = vma->vm_ops;
- vma->vm_ops = &cxlflash_mmap_vmops;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- return rc;
-}
-
-const struct file_operations cxlflash_cxl_fops = {
- .owner = THIS_MODULE,
- .mmap = cxlflash_cxl_mmap,
- .release = cxlflash_cxl_release,
-};
-
-/**
- * cxlflash_mark_contexts_error() - move contexts to error state and list
- * @cfg: Internal structure associated with the host.
- *
- * A context is only moved over to the error list when there are no outstanding
- * references to it. This ensures that a running operation has completed.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
-{
- int i, rc = 0;
- struct ctx_info *ctxi = NULL;
-
- mutex_lock(&cfg->ctx_tbl_list_mutex);
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctxi = cfg->ctx_tbl[i];
- if (ctxi) {
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[i] = NULL;
- list_add(&ctxi->list, &cfg->ctx_err_recovery);
- ctxi->err_recovery_active = true;
- ctxi->ctrl_map = NULL;
- unmap_context(ctxi);
- mutex_unlock(&ctxi->mutex);
- }
- }
-
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- return rc;
-}
-
-/*
- * Dummy NULL fops
- */
-static const struct file_operations null_fops = {
- .owner = THIS_MODULE,
-};
-
-/**
- * check_state() - checks and responds to the current adapter state
- * @cfg: Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * It assumes that the caller is an ioctl thread and holding the ioctl
- * read semaphore. This is temporarily let up across the wait to allow
- * for draining actively running ioctls. Also note that when waking up
- * from waiting in reset, the state is unknown and must be checked again
- * before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-int check_state(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
-retry:
- switch (cfg->state) {
- case STATE_RESET:
- dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
- up_read(&cfg->ioctl_rwsem);
- rc = wait_event_interruptible(cfg->reset_waitq,
- cfg->state != STATE_RESET);
- down_read(&cfg->ioctl_rwsem);
- if (unlikely(rc))
- break;
- goto retry;
- case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
- rc = -ENODEV;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/**
- * cxlflash_disk_attach() - attach a LUN to a context
- * @sdev: SCSI device associated with LUN.
- * @arg: Attach ioctl data structure.
- *
- * Creates a context and attaches LUN to it. A LUN can only be attached
- * one time to a context (subsequent attaches for the same context/LUN pair
- * are not supported). Additional LUNs can be attached to a context by
- * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_attach(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_attach *attach = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- int rc = 0;
- u32 perms;
- int ctxid = -1;
- u64 irqs = attach->num_interrupts;
- u64 flags = 0UL;
- u64 rctxid = 0UL;
- struct file *file = NULL;
-
- void *ctx = NULL;
-
- int fd = -1;
-
- if (irqs > 4) {
- dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, irqs);
- rc = -EINVAL;
- goto out;
- }
-
- if (gli->max_lba == 0) {
- dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
- __func__, lli->lun_id[sdev->channel]);
- rc = read_cap16(sdev, lli);
- if (rc) {
- dev_err(dev, "%s: Invalid device rc=%d\n",
- __func__, rc);
- rc = -ENODEV;
- goto out;
- }
- dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
- dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
- }
-
- if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
- rctxid = attach->context_id;
- ctxi = get_context(cfg, rctxid, NULL, 0);
- if (!ctxi) {
- dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
- __func__, rctxid);
- rc = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- dev_dbg(dev, "%s: Already attached\n",
- __func__);
- rc = -EINVAL;
- goto out;
- }
- }
-
- rc = scsi_device_get(sdev);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
- goto out;
- }
-
- lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
- if (unlikely(!lun_access)) {
- dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
- rc = -ENOMEM;
- goto err;
- }
-
- lun_access->lli = lli;
- lun_access->sdev = sdev;
-
- /* Non-NULL context indicates reuse (another context reference) */
- if (ctxi) {
- dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
- __func__, rctxid);
- kref_get(&ctxi->kref);
- list_add(&lun_access->list, &ctxi->luns);
- goto out_attach;
- }
-
- ctxi = create_context(cfg);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context ctxid=%d\n",
- __func__, ctxid);
- rc = -ENOMEM;
- goto err;
- }
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto err;
- }
-
- rc = cfg->ops->start_work(ctx, irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err;
- }
-
- /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
- perms = SISL_RHT_PERM(attach->hdr.flags + 1);
-
- /* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
-
- rc = afu_attach(cfg, ctxi);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- * There is no need to worry about a deadlock here because no one
- * knows about us yet; we can be the only one holding our mutex.
- */
- list_add(&lun_access->list, &ctxi->luns);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
-
-out_attach:
- if (fd != -1)
- flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- attach->hdr.return_flags = flags;
- attach->context_id = ctxi->ctxid;
- attach->block_size = gli->blk_len;
- attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- attach->last_lba = gli->max_lba;
- attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
- attach->max_xfer /= gli->blk_len;
-
-out:
- attach->adap_fd = fd;
-
- if (ctxi)
- put_context(ctxi);
-
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
- __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
- return rc;
-
-err:
- /* Cleanup CXL context; okay to 'stop' even if it was not started */
- if (!IS_ERR_OR_NULL(ctx)) {
- cfg->ops->stop_context(ctx);
- cfg->ops->release_context(ctx);
- ctx = NULL;
- }
-
- /*
- * Here, we're overriding the fops with a dummy all-NULL fops because
- * fput() calls the release fop, which will cause us to mistakenly
- * call into the CXL code. Rather than try to add yet more complexity
- * to that routine (cxlflash_cxl_release) we should try to fix the
- * issue here.
- */
- if (fd > 0) {
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
- file = NULL;
- }
-
- /* Cleanup our context */
- if (ctxi) {
- destroy_context(cfg, ctxi);
- ctxi = NULL;
- }
-
- kfree(lun_access);
- scsi_device_put(sdev);
- goto out;
-}
-
-/**
- * recover_context() - recovers a context in error
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- * @adap_fd: Adapter file descriptor associated with new/recovered context.
- *
- * Restablishes the state for a context-in-error.
- *
- * Return: 0 on success, -errno on failure
- */
-static int recover_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi,
- int *adap_fd)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- int fd = -1;
- int ctxid = -1;
- struct file *file;
- void *ctx;
- struct afu *afu = cfg->afu;
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto out;
- }
-
- rc = cfg->ops->start_work(ctx, ctxi->irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err2;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
- }
-
- /* Update with new MMIO area based on updated context id */
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
-
- rc = afu_attach(cfg, ctxi);
- if (rc) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err3;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- */
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->ctx = ctx;
- ctxi->file = file;
-
- /*
- * Put context back in table (note the reinit of the context list);
- * we must first drop the context's mutex and then acquire it in
- * order with the table/list mutex to avoid a deadlock - safe to do
- * here because no one can find us at this moment in time.
- */
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- list_del_init(&ctxi->list);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
- *adap_fd = fd;
-out:
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
- __func__, ctxid, fd, rc);
- return rc;
-
-err3:
- fput(file);
- put_unused_fd(fd);
-err2:
- cfg->ops->stop_context(ctx);
-err1:
- cfg->ops->release_context(ctx);
- goto out;
-}
-
-/**
- * cxlflash_afu_recover() - initiates AFU recovery
- * @sdev: SCSI device associated with LUN.
- * @arg: Recover ioctl data structure.
- *
- * Only a single recovery is allowed at a time to avoid exhausting CXL
- * resources (leading to recovery failure) in the event that we're up
- * against the maximum number of contexts limit. For similar reasons,
- * a context recovery is retried if there are multiple recoveries taking
- * place at the same time and the failure was due to CXL services being
- * unable to keep up.
- *
- * As this routine is called on ioctl context, it holds the ioctl r/w
- * semaphore that is used to drain ioctls in recovery scenarios. The
- * implementation to achieve the pacing described above (a local mutex)
- * requires that the ioctl r/w semaphore be dropped and reacquired to
- * avoid a 3-way deadlock when multiple process recoveries operate in
- * parallel.
- *
- * Because a user can detect an error condition before the kernel, it is
- * quite possible for this routine to act as the kernel's EEH detection
- * source (MMIO read of mbox_r). Because of this, there is a window of
- * time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter reset state). To avoid
- * looping in this routine during that window, a 1 second sleep is in place
- * between the time the MMIO failure is detected and the time a wait on the
- * reset wait queue is attempted via check_state().
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_recover(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_recover_afu *recover = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct afu *afu = cfg->afu;
- struct ctx_info *ctxi = NULL;
- struct mutex *mutex = &cfg->ctx_recovery_mutex;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 flags;
- u64 ctxid = DECODE_CTXID(recover->context_id),
- rctxid = recover->context_id;
- long reg;
- bool locked = true;
- int lretry = 20; /* up to 2 seconds */
- int new_adap_fd = -1;
- int rc = 0;
-
- atomic_inc(&cfg->recovery_threads);
- up_read(&cfg->ioctl_rwsem);
- rc = mutex_lock_interruptible(mutex);
- down_read(&cfg->ioctl_rwsem);
- if (rc) {
- locked = false;
- goto out;
- }
-
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
- __func__, recover->reason, rctxid);
-
-retry:
- /* Ensure that this process is attached to the context */
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- if (ctxi->err_recovery_active) {
-retry_recover:
- rc = recover_context(cfg, ctxi, &new_adap_fd);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
- __func__, ctxid, rc);
- if ((rc == -ENODEV) &&
- ((atomic_read(&cfg->recovery_threads) > 1) ||
- (lretry--))) {
- dev_dbg(dev, "%s: Going to try again\n",
- __func__);
- mutex_unlock(mutex);
- msleep(100);
- rc = mutex_lock_interruptible(mutex);
- if (rc) {
- locked = false;
- goto out;
- }
- goto retry_recover;
- }
-
- goto out;
- }
-
- ctxi->err_recovery_active = false;
-
- flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
- DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- recover->hdr.return_flags = flags;
- recover->context_id = ctxi->ctxid;
- recover->adap_fd = new_adap_fd;
- recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- goto out;
- }
-
- /* Test if in error state */
- reg = readq_be(&hwq->ctrl_map->mbox_r);
- if (reg == -1) {
- dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
-
- /*
- * Before checking the state, put back the context obtained with
- * get_context() as it is no longer needed and sleep for a short
- * period of time (see prolog notes).
- */
- put_context(ctxi);
- ctxi = NULL;
- ssleep(1);
- rc = check_state(cfg);
- if (unlikely(rc))
- goto out;
- goto retry;
- }
-
- dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
-out:
- if (likely(ctxi))
- put_context(ctxi);
- if (locked)
- mutex_unlock(mutex);
- atomic_dec_if_positive(&cfg->recovery_threads);
- return rc;
-}
-
-/**
- * process_sense() - evaluates and processes sense data
- * @sdev: SCSI device associated with LUN.
- * @verify: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int process_sense(struct scsi_device *sdev,
- struct dk_cxlflash_verify *verify)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- u64 prev_lba = gli->max_lba;
- struct scsi_sense_hdr sshdr = { 0 };
- int rc = 0;
-
- rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
- DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
- if (!rc) {
- dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device settings/capacity changed */
- rc = read_cap16(sdev, lli);
- if (rc) {
- rc = -ENODEV;
- break;
- }
- if (prev_lba != gli->max_lba)
- dev_dbg(dev, "%s: Capacity changed old=%lld "
- "new=%lld\n", __func__, prev_lba,
- gli->max_lba);
- break;
- case 0x3F: /* Report LUNs changed, Rescan. */
- scsi_scan_host(cfg->host);
- break;
- default:
- rc = -EIO;
- break;
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
- sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
- * @sdev: SCSI device associated with LUN.
- * @arg: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_verify(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_verify *verify = arg;
- int rc = 0;
- struct ctx_info *ctxi = NULL;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct sisl_rht_entry *rhte = NULL;
- res_hndl_t rhndl = verify->rsrc_handle;
- u64 ctxid = DECODE_CTXID(verify->context_id),
- rctxid = verify->context_id;
- u64 last_lba = 0;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
- "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
- verify->hint, verify->hdr.flags);
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Look at the hint/sense to see if it requires us to redrive
- * inquiry (i.e. the Unit attention is due to the WWN changing).
- */
- if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
- /* Can't hold mutex across process_sense/read_cap16,
- * since we could have an intervening EEH event.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- rc = process_sense(sdev, verify);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed to validate sense data (%d)\n",
- __func__, rc);
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- goto out;
- }
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- switch (gli->mode) {
- case MODE_PHYSICAL:
- last_lba = gli->max_lba;
- break;
- case MODE_VIRTUAL:
- /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
- last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
- last_lba /= CXLFLASH_BLOCK_SIZE;
- last_lba--;
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- }
-
- verify->last_lba = last_lba;
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
- __func__, rc, verify->last_lba);
- return rc;
-}
-
-/**
- * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
- * @cmd: The ioctl command to decode.
- *
- * Return: A string identifying the decoded ioctl.
- */
-static char *decode_ioctl(unsigned int cmd)
-{
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- return __stringify_1(DK_CXLFLASH_ATTACH);
- case DK_CXLFLASH_USER_DIRECT:
- return __stringify_1(DK_CXLFLASH_USER_DIRECT);
- case DK_CXLFLASH_USER_VIRTUAL:
- return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
- case DK_CXLFLASH_VLUN_RESIZE:
- return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
- case DK_CXLFLASH_RELEASE:
- return __stringify_1(DK_CXLFLASH_RELEASE);
- case DK_CXLFLASH_DETACH:
- return __stringify_1(DK_CXLFLASH_DETACH);
- case DK_CXLFLASH_VERIFY:
- return __stringify_1(DK_CXLFLASH_VERIFY);
- case DK_CXLFLASH_VLUN_CLONE:
- return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
- case DK_CXLFLASH_RECOVER_AFU:
- return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
- case DK_CXLFLASH_MANAGE_LUN:
- return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_disk_direct_open() - opens a direct (physical) disk
- * @sdev: SCSI device associated with LUN.
- * @arg: UDirect ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the direct lun and the size (in blocks) of
- * the direct lun in last LBA format.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct dk_cxlflash_release rel = { { 0 }, 0 };
-
- struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
-
- u64 ctxid = DECODE_CTXID(pphys->context_id),
- rctxid = pphys->context_id;
- u64 lun_size = 0;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
- u32 port = CHAN2PORTMASK(sdev->channel);
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
- goto out;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
-
- last_lba = gli->max_lba;
- pphys->hdr.return_flags = 0;
- pphys->last_lba = last_lba;
- pphys->rsrc_handle = rsrc_handle;
-
- rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- marshal_udir_to_rele(pphys, &rel);
- _cxlflash_disk_release(sdev, ctxi, &rel);
- goto out;
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-}
-
-/**
- * ioctl_common() - common IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- *
- * Handles common fencing operations that are valid for multiple ioctls. Always
- * allow through ioctls that are cleanup oriented in nature, even when operating
- * in a failed/terminating state.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- int rc = 0;
-
- if (unlikely(!lli)) {
- dev_dbg(dev, "%s: Unknown LUN\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = check_state(cfg);
- if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
- switch (cmd) {
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- dev_dbg(dev, "%s: Command override rc=%d\n",
- __func__, rc);
- rc = 0;
- break;
- }
- }
-out:
- return rc;
-}
-
-/**
- * cxlflash_ioctl() - IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
-{
- typedef int (*sioctl) (struct scsi_device *, void *);
-
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_hdr *hdr;
- char buf[sizeof(union cxlflash_ioctls)];
- size_t size = 0;
- bool known_ioctl = false;
- int idx;
- int rc = 0;
- struct Scsi_Host *shost = sdev->host;
- sioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- sioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- {sizeof(struct dk_cxlflash_attach), cxlflash_disk_attach},
- {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
- {sizeof(struct dk_cxlflash_release), cxlflash_disk_release},
- {sizeof(struct dk_cxlflash_detach), cxlflash_disk_detach},
- {sizeof(struct dk_cxlflash_verify), cxlflash_disk_verify},
- {sizeof(struct dk_cxlflash_recover_afu), cxlflash_afu_recover},
- {sizeof(struct dk_cxlflash_manage_lun), cxlflash_manage_lun},
- {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
- {sizeof(struct dk_cxlflash_resize), cxlflash_vlun_resize},
- {sizeof(struct dk_cxlflash_clone), cxlflash_disk_clone},
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- /* Restrict command set to physical support only for internal LUN */
- if (afu->internal_lun)
- switch (cmd) {
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
- __func__, decode_ioctl(cmd), afu->internal_lun);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- case DK_CXLFLASH_USER_DIRECT:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- case DK_CXLFLASH_VERIFY:
- case DK_CXLFLASH_RECOVER_AFU:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
- __func__, decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun);
- rc = ioctl_common(sdev, cmd);
- if (unlikely(rc))
- goto cxlflash_ioctl_exit;
-
- fallthrough;
-
- case DK_CXLFLASH_MANAGE_LUN:
- known_ioctl = true;
- idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- goto cxlflash_ioctl_exit;
- }
-
- hdr = (struct dk_cxlflash_hdr *)&buf;
- if (hdr->version != DK_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_ioctl(cmd));
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- rc = do_ioctl(sdev, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-cxlflash_ioctl_exit:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__,
- decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__, decode_ioctl(cmd),
- cmd, shost->host_no, sdev->channel, sdev->id,
- sdev->lun, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
deleted file mode 100644
index fe8c975d13d7..000000000000
--- a/drivers/scsi/cxlflash/superpipe.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_SUPERPIPE_H
-#define _CXLFLASH_SUPERPIPE_H
-
-extern struct cxlflash_global global;
-
-/*
- * Terminology: use afu (and not adapter) to refer to the HW.
- * Adapter is the entire slot and includes PSL out of which
- * only the AFU is visible to user space.
- */
-
-/* Chunk size parms: note sislite minimum chunk size is
- * 0x10000 LBAs corresponding to a NMASK or 16.
- */
-#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
-
-#define CMD_TIMEOUT 30 /* 30 secs */
-#define CMD_RETRIES 5 /* 5 retries for scsi_execute */
-
-#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
-
-enum lun_mode {
- MODE_NONE = 0,
- MODE_VIRTUAL,
- MODE_PHYSICAL
-};
-
-/* Global (entire driver, spans adapters) lun_info structure */
-struct glun_info {
- u64 max_lba; /* from read cap(16) */
- u32 blk_len; /* from read cap(16) */
- enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
- int users; /* Number of users w/ references to LUN */
-
- u8 wwid[16];
-
- struct mutex mutex;
-
- struct blka blka;
- struct list_head list;
-};
-
-/* Local (per-adapter) lun_info structure */
-struct llun_info {
- u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */
- u32 lun_index; /* Index in the LUN table */
- u32 host_no; /* host_no from Scsi_host */
- u32 port_sel; /* What port to use for this LUN */
- bool in_table; /* Whether a LUN table entry was created */
-
- u8 wwid[16]; /* Keep a duplicate copy here? */
-
- struct glun_info *parent; /* Pointer to entry in global LUN structure */
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-struct lun_access {
- struct llun_info *lli;
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-enum ctx_ctrl {
- CTX_CTRL_CLONE = (1 << 1),
- CTX_CTRL_ERR = (1 << 2),
- CTX_CTRL_ERR_FALLBACK = (1 << 3),
- CTX_CTRL_NOPID = (1 << 4),
- CTX_CTRL_FILE = (1 << 5)
-};
-
-#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
-#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
-
-struct ctx_info {
- struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
- struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
- * alloc/free on attach/detach
- */
- u32 rht_out; /* Number of checked out RHT entries */
- u32 rht_perms; /* User-defined permissions for RHT entries */
- struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
- u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
-
- u64 ctxid;
- u64 irqs; /* Number of interrupts requested for context */
- pid_t pid;
- bool initialized;
- bool unavail;
- bool err_recovery_active;
- struct mutex mutex; /* Context protection */
- struct kref kref;
- void *ctx;
- struct cxlflash_cfg *cfg;
- struct list_head luns; /* LUNs attached to this context */
- const struct vm_operations_struct *cxl_mmap_vmops;
- struct file *file;
- struct list_head list; /* Link contexts in error recovery */
-};
-
-struct cxlflash_global {
- struct mutex mutex;
- struct list_head gluns;/* list of glun_info structs */
- struct page *err_page; /* One page of all 0xF for error notification */
-};
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize);
-int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize);
-
-int cxlflash_disk_release(struct scsi_device *sdev,
- void *release);
-int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_release *release);
-
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg);
-
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
-
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked);
-void cxlflash_lun_detach(struct glun_info *gli);
-
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg,
- enum ctx_ctrl ctrl);
-void put_context(struct ctx_info *ctxi);
-
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli);
-
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli);
-void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
-
-void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-
-int cxlflash_manage_lun(struct scsi_device *sdev, void *manage);
-
-int check_state(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
deleted file mode 100644
index 32e807703377..000000000000
--- a/drivers/scsi/cxlflash/vlun.c
+++ /dev/null
@@ -1,1336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-#include <asm/bitsperlong.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * marshal_virt_to_resize() - translate uvirtual to resize structure
- * @virt: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = virt->hdr;
- resize->context_id = virt->context_id;
- resize->rsrc_handle = virt->rsrc_handle;
- resize->req_size = virt->lun_size;
- resize->last_lba = virt->last_lba;
-}
-
-/**
- * marshal_clone_to_rele() - translate clone to release structure
- * @clone: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
- struct dk_cxlflash_release *release)
-{
- release->hdr = clone->hdr;
- release->context_id = clone->context_id_dst;
-}
-
-/**
- * ba_init() - initializes a block allocator
- * @ba_lun: Block allocator to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ba_init(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = NULL;
- int lun_size_au = 0, i = 0;
- int last_word_underflow = 0;
- u64 *lam;
-
- pr_debug("%s: Initializing LUN: lun_id=%016llx "
- "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
- __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
-
- /* Calculate bit map size */
- lun_size_au = ba_lun->lsize / ba_lun->au_size;
- if (lun_size_au == 0) {
- pr_debug("%s: Requested LUN size of 0!\n", __func__);
- return -EINVAL;
- }
-
- /* Allocate lun information container */
- bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
- if (unlikely(!bali)) {
- pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -ENOMEM;
- }
-
- bali->total_aus = lun_size_au;
- bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
-
- if (lun_size_au % BITS_PER_LONG)
- bali->lun_bmap_size++;
-
- /* Allocate bitmap space */
- bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
- GFP_KERNEL);
- if (unlikely(!bali->lun_alloc_map)) {
- pr_err("%s: Failed to allocate lun allocation map: "
- "lun_id=%016llx\n", __func__, ba_lun->lun_id);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Initialize the bit map size and set all bits to '1' */
- bali->free_aun_cnt = lun_size_au;
-
- for (i = 0; i < bali->lun_bmap_size; i++)
- bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
-
- /* If the last word not fully utilized, mark extra bits as allocated */
- last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
- last_word_underflow -= bali->free_aun_cnt;
- if (last_word_underflow > 0) {
- lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
- for (i = (HIBIT - last_word_underflow + 1);
- i < BITS_PER_LONG;
- i++)
- clear_bit(i, (ulong *)lam);
- }
-
- /* Initialize high elevator index, low/curr already at 0 from kzalloc */
- bali->free_high_idx = bali->lun_bmap_size;
-
- /* Allocate clone map */
- bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
- GFP_KERNEL);
- if (unlikely(!bali->aun_clone_map)) {
- pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Pass the allocated LUN info as a handle to the user */
- ba_lun->ba_lun_handle = bali;
-
- pr_debug("%s: Successfully initialized the LUN: "
- "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->lun_bmap_size,
- bali->free_aun_cnt);
- return 0;
-}
-
-/**
- * find_free_range() - locates a free bit within the block allocator
- * @low: First word in block allocator to start search.
- * @high: Last word in block allocator to search.
- * @bali: LUN information structure owning the block allocator to search.
- * @bit_word: Passes back the word in the block allocator owning the free bit.
- *
- * Return: The bit position within the passed back word, -1 on failure
- */
-static int find_free_range(u32 low,
- u32 high,
- struct ba_lun_info *bali, int *bit_word)
-{
- int i;
- u64 bit_pos = -1;
- ulong *lam, num_bits;
-
- for (i = low; i < high; i++)
- if (bali->lun_alloc_map[i] != 0) {
- lam = (ulong *)&bali->lun_alloc_map[i];
- num_bits = (sizeof(*lam) * BITS_PER_BYTE);
- bit_pos = find_first_bit(lam, num_bits);
-
- pr_devel("%s: Found free bit %llu in LUN "
- "map entry %016llx at bitmap index = %d\n",
- __func__, bit_pos, bali->lun_alloc_map[i], i);
-
- *bit_word = i;
- bali->free_aun_cnt--;
- clear_bit(bit_pos, lam);
- break;
- }
-
- return bit_pos;
-}
-
-/**
- * ba_alloc() - allocates a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- *
- * Return: The allocated block, -1 on failure
- */
-static u64 ba_alloc(struct ba_lun *ba_lun)
-{
- u64 bit_pos = -1;
- int bit_word = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- pr_debug("%s: Received block allocation request: "
- "lun_id=%016llx free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->free_aun_cnt);
-
- if (bali->free_aun_cnt == 0) {
- pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -1ULL;
- }
-
- /* Search to find a free entry, curr->high then low->curr */
- bit_pos = find_free_range(bali->free_curr_idx,
- bali->free_high_idx, bali, &bit_word);
- if (bit_pos == -1) {
- bit_pos = find_free_range(bali->free_low_idx,
- bali->free_curr_idx,
- bali, &bit_word);
- if (bit_pos == -1) {
- pr_debug("%s: Could not find an allocation unit on LUN:"
- " lun_id=%016llx\n", __func__, ba_lun->lun_id);
- return -1ULL;
- }
- }
-
- /* Update the free_curr_idx */
- if (bit_pos == HIBIT)
- bali->free_curr_idx = bit_word + 1;
- else
- bali->free_curr_idx = bit_word;
-
- pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__,
- ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
- bali->free_aun_cnt);
-
- return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
-}
-
-/**
- * validate_alloc() - validates the specified block has been allocated
- * @bali: LUN info owning the block allocator.
- * @aun: Block to validate.
- *
- * Return: 0 on success, -1 on failure
- */
-static int validate_alloc(struct ba_lun_info *bali, u64 aun)
-{
- int idx = 0, bit_pos = 0;
-
- idx = aun / BITS_PER_LONG;
- bit_pos = aun % BITS_PER_LONG;
-
- if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
- return -1;
-
- return 0;
-}
-
-/**
- * ba_free() - frees a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_free(struct ba_lun *ba_lun, u64 to_free)
-{
- int idx = 0, bit_pos = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_free)) {
- pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
- __func__, to_free, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
- bali->free_aun_cnt);
-
- if (bali->aun_clone_map[to_free] > 0) {
- pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
- __func__, to_free, ba_lun->lun_id,
- bali->aun_clone_map[to_free]);
- bali->aun_clone_map[to_free]--;
- return 0;
- }
-
- idx = to_free / BITS_PER_LONG;
- bit_pos = to_free % BITS_PER_LONG;
-
- set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
- bali->free_aun_cnt++;
-
- if (idx < bali->free_low_idx)
- bali->free_low_idx = idx;
- else if (idx > bali->free_high_idx)
- bali->free_high_idx = idx;
-
- pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
- "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
- ba_lun->lun_id, bali->free_aun_cnt);
-
- return 0;
-}
-
-/**
- * ba_clone() - Clone a chunk of the block allocation table
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_clone: Block to clone.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_clone)) {
- pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
-
- if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
- pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- bali->aun_clone_map[to_clone]++;
-
- return 0;
-}
-
-/**
- * ba_space() - returns the amount of free space left in the block allocator
- * @ba_lun: Block allocator.
- *
- * Return: Amount of free space in block allocator
- */
-static u64 ba_space(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- return bali->free_aun_cnt;
-}
-
-/**
- * cxlflash_ba_terminate() - frees resources associated with the block allocator
- * @ba_lun: Block allocator.
- *
- * Safe to call in a partially allocated state.
- */
-void cxlflash_ba_terminate(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (bali) {
- kfree(bali->aun_clone_map);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- ba_lun->ba_lun_handle = NULL;
- }
-}
-
-/**
- * init_vlun() - initializes a LUN for virtual use
- * @lli: LUN information structure that owns the block allocator.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_vlun(struct llun_info *lli)
-{
- int rc = 0;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
-
- memset(blka, 0, sizeof(*blka));
- mutex_init(&blka->mutex);
-
- /* LUN IDs are unique per port, save the index instead */
- blka->ba_lun.lun_id = lli->lun_index;
- blka->ba_lun.lsize = gli->max_lba + 1;
- blka->ba_lun.lba_size = gli->blk_len;
-
- blka->ba_lun.au_size = MC_CHUNK_SIZE;
- blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
-
- rc = ba_init(&blka->ba_lun);
- if (unlikely(rc))
- pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
-
- pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
- return rc;
-}
-
-/**
- * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
- * @sdev: SCSI device associated with LUN.
- * @lba: Logical block address to start write same.
- * @nblks: Number of logical blocks to write same.
- *
- * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
- * part of the recovery, the handler drains all currently running ioctls,
- * waiting until they have completed before proceeding with a reset. As this
- * routine is used on the ioctl path, this can create a condition where the
- * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
- * avoid this behavior, temporarily unmark this thread as an ioctl thread by
- * releasing the ioctl read semaphore. This will allow the EEH handler to
- * proceed with a recovery while this thread is still running. Once the
- * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
- * adapter state in case it changed while inside of scsi_execute_cmd(). The
- * state check will wait if the adapter is still being recovered or return a
- * failure if the recovery failed. In the event that the adapter reset failed,
- * simply return the failure as the ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int write_same16(struct scsi_device *sdev,
- u64 lba,
- u32 nblks)
-{
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- u64 offset = lba;
- int left = nblks;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- const u32 s = ilog2(sdev->sector_size) - 9;
- const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit =
- sdev->request_queue->limits.max_write_zeroes_sectors >> s;
-
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- while (left > 0) {
-
- scsi_cmd[0] = WRITE_SAME_16;
- scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
- put_unaligned_be64(offset, &scsi_cmd[2]);
- put_unaligned_be32(ws_limit < left ? ws_limit : left,
- &scsi_cmd[10]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
- cmd_buf, CMD_BUFSIZE, to,
- CMD_RETRIES, NULL);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result) {
- dev_err_ratelimited(dev, "%s: command failed for "
- "offset=%lld result=%08x\n",
- __func__, offset, result);
- rc = -EIO;
- goto out;
- }
- left -= ws_limit;
- offset += ws_limit;
- }
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * grow_lxt() - expands the translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @new_size: Number of translation entries associated with RHTE.
- *
- * By design, this routine employs a 'best attempt' allocation and will
- * truncate the requested size down if there is not sufficient space in
- * the block allocator to satisfy the request but there does exist some
- * amount of space. The user is made aware of this by returning the size
- * allocated.
- *
- * Return: 0 on success, -errno on failure
- */
-static int grow_lxt(struct afu *afu,
- struct scsi_device *sdev,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- u32 av_size;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = *new_size - rhte->lxt_cnt;
- u64 my_new_size;
- int i, rc = 0;
-
- /*
- * Check what is available in the block allocator before re-allocating
- * LXT array. This is done up front under the mutex which must not be
- * released until after allocation is complete.
- */
- mutex_lock(&blka->mutex);
- av_size = ba_space(&blka->ba_lun);
- if (unlikely(av_size <= 0)) {
- dev_dbg(dev, "%s: ba_space error av_size=%d\n",
- __func__, av_size);
- mutex_unlock(&blka->mutex);
- rc = -ENOSPC;
- goto out;
- }
-
- if (av_size < delta)
- delta = av_size;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
-
- if (ngrps != ngrps_old) {
- /* reallocate to fit new size */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- mutex_unlock(&blka->mutex);
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over all old entries */
- memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
- } else
- lxt = lxt_old;
-
- /* nothing can fail from now on */
- my_new_size = rhte->lxt_cnt + delta;
-
- /* add new entries to the end */
- for (i = rhte->lxt_cnt; i < my_new_size; i++) {
- /*
- * Due to the earlier check of available space, ba_alloc
- * cannot fail here. If it did due to internal error,
- * leave a rlba_base of -1u which will likely be a
- * invalid LUN (too large).
- */
- aun = ba_alloc(&blka->ba_lun);
- if ((aun == -1ULL) || (aun >= blka->nchunk))
- dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
- "max=%llu\n", __func__, aun, blka->nchunk - 1);
-
- /* select both ports, use r/w perms from RHT */
- lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
- (lli->lun_index << LXT_LUNIDX_SHIFT) |
- (RHT_PERM_RW << LXT_PERM_SHIFT |
- lli->port_sel));
- }
-
- mutex_unlock(&blka->mutex);
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
-
- /* free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * shrink_lxt() - reduces translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @ctxi: Context owning resources.
- * @new_size: Number of translation entries associated with RHTE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int shrink_lxt(struct afu *afu,
- struct scsi_device *sdev,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct ctx_info *ctxi,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt, *lxt_old;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
- bool needs_ws = ctxi->rht_needs_ws[rhndl];
- bool needs_sync = !ctxi->err_recovery_active;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = rhte->lxt_cnt - *new_size;
- u64 my_new_size;
- int i, rc = 0;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
-
- if (ngrps != ngrps_old) {
- /* Reallocate to fit new size unless new size is 0 */
- if (ngrps) {
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy over old entries that will remain */
- memcpy(lxt, lxt_old,
- (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
- } else
- lxt = NULL;
- } else
- lxt = lxt_old;
-
- /* Nothing can fail from now on */
- my_new_size = rhte->lxt_cnt - delta;
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when removing LXT entries.
- */
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- if (needs_sync) {
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
- }
-
- if (needs_ws) {
- /*
- * Mark the context as unavailable, so that we can release
- * the mutex safely.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- }
-
- /* Free LBAs allocated to freed chunks */
- mutex_lock(&blka->mutex);
- for (i = delta - 1; i >= 0; i--) {
- aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
- if (needs_ws)
- write_same16(sdev, aun, MC_CHUNK_SIZE);
- ba_free(&blka->ba_lun, aun);
- }
- mutex_unlock(&blka->mutex);
-
- if (needs_ws) {
- /* Make the context visible again */
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- /* Free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * _cxlflash_vlun_resize() - changes the size of a virtual LUN
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @ctxi: Context owning resources.
- * @resize: Resize ioctl data structure.
- *
- * On successful return, the user is informed of the new size (in blocks)
- * of the virtual LUN in last LBA format. When the size of the virtual
- * LUN is zero, the last LBA is reflected as -1. See comment in the
- * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
- * on the error recovery list.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_vlun_resize(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- res_hndl_t rhndl = resize->rsrc_handle;
- u64 new_size;
- u64 nsectors;
- u64 ctxid = DECODE_CTXID(resize->context_id),
- rctxid = resize->context_id;
-
- struct sisl_rht_entry *rhte;
-
- int rc = 0;
-
- /*
- * The requested size (req_size) is always assumed to be in 4k blocks,
- * so we have to convert it here from 4k to chunk size.
- */
- nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
- new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
- __func__, ctxid, resize->rsrc_handle, resize->req_size,
- new_size);
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
- __func__, gli->mode);
- rc = -EINVAL;
- goto out;
-
- }
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- if (new_size > rhte->lxt_cnt)
- rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
- else if (new_size < rhte->lxt_cnt)
- rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
- else {
- /*
- * Rare case where there is already sufficient space, just
- * need to perform a translation sync with the AFU. This
- * scenario likely follows a previous sync failure during
- * a resize operation. Accordingly, perform the heavyweight
- * form of translation sync as it is unknown which type of
- * resize failed previously.
- */
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto out;
- }
- }
-
- resize->hdr.return_flags = 0;
- resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
- resize->last_lba /= CXLFLASH_BLOCK_SIZE;
- resize->last_lba--;
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
- __func__, resize->last_lba, rc);
- return rc;
-}
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize)
-{
- return _cxlflash_vlun_resize(sdev, NULL, resize);
-}
-
-/**
- * cxlflash_restore_luntable() - Restore LUN table to prior state
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
- u32 lind;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- if (!lli->in_table)
- continue;
-
- lind = lli->lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++)
- if (lli->port_sel & (1 << k)) {
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
- }
-
- mutex_unlock(&global.mutex);
-}
-
-/**
- * get_num_ports() - compute number of ports from port selection mask
- * @psm: Port selection mask.
- *
- * Return: Population count of port selection mask
- */
-static inline u8 get_num_ports(u32 psm)
-{
- static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
- 1, 2, 2, 3, 2, 3, 3, 4 };
-
- return bits[psm & 0xf];
-}
-
-/**
- * init_luntable() - write an entry in the LUN table
- * @cfg: Internal structure associated with the host.
- * @lli: Per adapter LUN information structure.
- *
- * On successful return, a LUN table entry is created:
- * - at the top for LUNs visible on multiple ports.
- * - at the bottom for LUNs visible only on one port.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
-{
- u32 chan;
- u32 lind;
- u32 nports;
- int rc = 0;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- if (lli->in_table)
- goto out;
-
- nports = get_num_ports(lli->port_sel);
- if (nports == 0 || nports > cfg->num_fc_ports) {
- WARN(1, "Unsupported port configuration nports=%u", nports);
- rc = -EIO;
- goto out;
- }
-
- if (nports > 1) {
- /*
- * When LUN is visible from multiple ports, we will put
- * it in the top half of the LUN table.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
- rc = -ENOSPC;
- goto out;
- }
- }
-
- lind = lli->lun_index = cfg->promote_lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
-
- cfg->promote_lun_index++;
- } else {
- /*
- * When LUN is visible only from one port, we will put
- * it in the bottom half of the LUN table.
- */
- chan = PORTMASK2CHAN(lli->port_sel);
- if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
- rc = -ENOSPC;
- goto out;
- }
-
- lind = lli->lun_index = cfg->last_lun_index[chan];
- fc_port_luns = get_fc_port_luns(cfg, chan);
- writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
- cfg->last_lun_index[chan]--;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
- __func__, lind, chan, lli->lun_id[chan]);
- }
-
- lli->in_table = true;
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_virtual_open() - open a virtual disk of specified size
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: UVirtual ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the virtual LUN and the size (in blocks) of
- * the virtual LUN in last LBA format. When the size of the virtual LUN
- * is zero, the last LBA is reflected as -1.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
-
- struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
- struct dk_cxlflash_resize resize;
-
- u64 ctxid = DECODE_CTXID(virt->context_id),
- rctxid = virt->context_id;
- u64 lun_size = virt->lun_size;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- /* Setup the LUNs block allocator on first call */
- mutex_lock(&gli->mutex);
- if (gli->mode == MODE_NONE) {
- rc = init_vlun(lli);
- if (rc) {
- dev_err(dev, "%s: init_vlun failed rc=%d\n",
- __func__, rc);
- rc = -ENOMEM;
- goto err0;
- }
- }
-
- rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
- goto err0;
- }
- mutex_unlock(&gli->mutex);
-
- rc = init_luntable(cfg, lli);
- if (rc) {
- dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: too many opens ctxid=%llu\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- /* Populate RHT format 0 */
- rhte->nmask = MC_RHT_NMASK;
- rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
-
- /* Resize even if requested size is 0 */
- marshal_virt_to_resize(virt, &resize);
- resize.rsrc_handle = rsrc_handle;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
- if (rc) {
- dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
- goto err2;
- }
- last_lba = resize.last_lba;
-
- if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
- ctxi->rht_needs_ws[rsrc_handle] = true;
-
- virt->hdr.return_flags = 0;
- virt->last_lba = last_lba;
- virt->rsrc_handle = rsrc_handle;
-
- if (get_num_ports(lli->port_sel) > 1)
- virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- rhte_checkin(ctxi, rhte);
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-err0:
- /* Special common cleanup prior to successful LUN attach */
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- mutex_unlock(&gli->mutex);
- goto out;
-}
-
-/**
- * clone_lxt() - copies translation tables from source to destination RHTE
- * @afu: AFU associated with the host.
- * @blka: Block allocator associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Destination resource handle entry (RHTE).
- * @rhte_src: Source resource handle entry (RHTE).
- *
- * Return: 0 on success, -errno on failure
- */
-static int clone_lxt(struct afu *afu,
- struct blka *blka,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct sisl_rht_entry *rhte_src)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL;
- bool locked = false;
- u32 ngrps;
- u64 aun; /* chunk# allocated by block allocator */
- int j;
- int i = 0;
- int rc = 0;
-
- ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
-
- if (ngrps) {
- /* allocate new LXTs for clone */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over */
- memcpy(lxt, rhte_src->lxt_start,
- (sizeof(*lxt) * rhte_src->lxt_cnt));
-
- /* clone the LBAs in block allocator via ref_cnt, note that the
- * block allocator mutex must be held until it is established
- * that this routine will complete without the need for a
- * cleanup.
- */
- mutex_lock(&blka->mutex);
- locked = true;
- for (i = 0; i < rhte_src->lxt_cnt; i++) {
- aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
- if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- rc = -EIO;
- goto err;
- }
- }
- }
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = rhte_src->lxt_cnt;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto err2;
- }
-
-out:
- if (locked)
- mutex_unlock(&blka->mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- /* Reset the RHTE */
- rhte->lxt_cnt = 0;
- dma_wmb();
- rhte->lxt_start = NULL;
- dma_wmb();
-err:
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
- kfree(lxt);
- goto out;
-}
-
-/**
- * cxlflash_disk_clone() - clone a context by making snapshot of another
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: Clone ioctl data structure.
- *
- * This routine effectively performs cxlflash_disk_open operation for each
- * in-use virtual resource in the source context. Note that the destination
- * context must be in pristine state and cannot have any resource handles
- * open at the time of the clone.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_clone *clone = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_release release = { { 0 }, 0 };
-
- struct ctx_info *ctxi_src = NULL,
- *ctxi_dst = NULL;
- struct lun_access *lun_access_src, *lun_access_dst;
- u32 perms;
- u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
- ctxid_dst = DECODE_CTXID(clone->context_id_dst),
- rctxid_src = clone->context_id_src,
- rctxid_dst = clone->context_id_dst;
- int i, j;
- int rc = 0;
- bool found;
- LIST_HEAD(sidecar);
-
- dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
-
- /* Do not clone yourself */
- if (unlikely(rctxid_src == rctxid_dst)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- rc = -EINVAL;
- dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
- __func__, gli->mode);
- goto out;
- }
-
- ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
- ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
- if (unlikely(!ctxi_src || !ctxi_dst)) {
- dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
- rc = -EINVAL;
- goto out;
- }
-
- /* Verify there is no open resource handle in the destination context */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi_dst->rht_start[i].nmask != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Clone LUN access list */
- list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
- found = false;
- list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
- if (lun_access_dst->sdev == lun_access_src->sdev) {
- found = true;
- break;
- }
-
- if (!found) {
- lun_access_dst = kzalloc(sizeof(*lun_access_dst),
- GFP_KERNEL);
- if (unlikely(!lun_access_dst)) {
- dev_err(dev, "%s: lun_access allocation fail\n",
- __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- *lun_access_dst = *lun_access_src;
- list_add(&lun_access_dst->list, &sidecar);
- }
- }
-
- if (unlikely(!ctxi_src->rht_out)) {
- dev_dbg(dev, "%s: Nothing to clone\n", __func__);
- goto out_success;
- }
-
- /* User specified permission on attach */
- perms = ctxi_dst->rht_perms;
-
- /*
- * Copy over checked-out RHT (and their associated LXT) entries by
- * hand, stopping after we've copied all outstanding entries and
- * cleaning up if the clone fails.
- *
- * Note: This loop is equivalent to performing cxlflash_disk_open and
- * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
- * account by attaching after each successful RHT entry clone. In the
- * event that a clone failure is experienced, the LUN detach is handled
- * via the cleanup performed by _cxlflash_disk_release.
- */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi_src->rht_out == ctxi_dst->rht_out)
- break;
- if (ctxi_src->rht_start[i].nmask == 0)
- continue;
-
- /* Consume a destination RHT entry */
- ctxi_dst->rht_out++;
- ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
- ctxi_dst->rht_start[i].fp =
- SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
- ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
-
- rc = clone_lxt(afu, blka, ctxid_dst, i,
- &ctxi_dst->rht_start[i],
- &ctxi_src->rht_start[i]);
- if (rc) {
- marshal_clone_to_rele(clone, &release);
- for (j = 0; j < i; j++) {
- release.rsrc_handle = j;
- _cxlflash_disk_release(sdev, ctxi_dst,
- &release);
- }
-
- /* Put back the one we failed on */
- rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
- goto err;
- }
-
- cxlflash_lun_attach(gli, gli->mode, false);
- }
-
-out_success:
- list_splice(&sidecar, &ctxi_dst->luns);
-
- /* fall through */
-out:
- if (ctxi_src)
- put_context(ctxi_src);
- if (ctxi_dst)
- put_context(ctxi_dst);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err:
- list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
- kfree(lun_access_src);
- goto out;
-}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
deleted file mode 100644
index 68e3ea52fe80..000000000000
--- a/drivers/scsi/cxlflash/vlun.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_VLUN_H
-#define _CXLFLASH_VLUN_H
-
-/* RHT - Resource Handle Table */
-#define MC_RHT_NMASK 16 /* in bits */
-#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
-
-#define HIBIT (BITS_PER_LONG - 1)
-
-#define MAX_AUN_CLONE_CNT 0xFF
-
-/*
- * LXT - LBA Translation Table
- *
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- * | RLBA_BASE |LUN_IDX| P |SEL|
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- *
- * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
- * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
- * with RLBA_BASE. The result is the physical LBA to be sent to storage.
- * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
- * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
- * with a global port select bit-mask maintained by the driver.
- * In addition, it has permission bits that are ANDed with the
- * RHT permissions to arrive at the final permissions for the chunk.
- *
- * LXT tables are allocated dynamically in groups. This is done to avoid
- * a malloc/free overhead each time the LXT has to grow or shrink.
- *
- * Based on the current lxt_cnt (used), it is always possible to know
- * how many are allocated (used+free). The number of allocated entries is
- * not stored anywhere.
- *
- * The LXT table is re-allocated whenever it needs to cross into another group.
- */
-#define LXT_GROUP_SIZE 8
-#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
-#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
-#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
-
-struct ba_lun_info {
- u64 *lun_alloc_map;
- u32 lun_bmap_size;
- u32 total_aus;
- u64 free_aun_cnt;
-
- /* indices to be used for elevator lookup of free map */
- u32 free_low_idx;
- u32 free_curr_idx;
- u32 free_high_idx;
-
- u8 *aun_clone_map;
-};
-
-struct ba_lun {
- u64 lun_id;
- u64 wwpn;
- size_t lsize; /* LUN size in number of LBAs */
- size_t lba_size; /* LBA size in number of bytes */
- size_t au_size; /* Allocation Unit size in number of LBAs */
- struct ba_lun_info *ba_lun_handle;
-};
-
-/* Block Allocator */
-struct blka {
- struct ba_lun ba_lun;
- u64 nchunk; /* number of chunks */
- struct mutex mutex;
-};
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index d108a86e196e..8dc6be9a00c1 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -765,7 +765,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
return;
if (timer_pending(&acb->waiting_timer))
- del_timer(&acb->waiting_timer);
+ timer_delete(&acb->waiting_timer);
if (list_empty(dcb_list_head))
return;
@@ -1153,7 +1153,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
if (timer_pending(&acb->waiting_timer))
- del_timer(&acb->waiting_timer);
+ timer_delete(&acb->waiting_timer);
/*
* disable interrupt
@@ -1561,7 +1561,7 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
/*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
if (timer_pending(&acb->selto_timer))
- del_timer(&acb->selto_timer);
+ timer_delete(&acb->selto_timer);
if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
disconnect(acb); /* bus free interrupt */
@@ -3454,7 +3454,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb)
dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
/* delay half a second */
if (timer_pending(&acb->waiting_timer))
- del_timer(&acb->waiting_timer);
+ timer_delete(&acb->waiting_timer);
DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
@@ -3715,13 +3715,13 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
/**
- * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
+ * dc395x_sdev_init - Called by the scsi mid layer to tell us about a new
* scsi device that we need to deal with. We allocate a new device and then
* insert that device into the adapters device list.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static int dc395x_slave_alloc(struct scsi_device *scsi_device)
+static int dc395x_sdev_init(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb;
@@ -3736,12 +3736,12 @@ static int dc395x_slave_alloc(struct scsi_device *scsi_device)
/**
- * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
+ * dc395x_sdev_destroy - Called by the scsi mid layer to tell us about a
* device that is going away.
*
* @scsi_device: The new scsi device that we need to handle.
**/
-static void dc395x_slave_destroy(struct scsi_device *scsi_device)
+static void dc395x_sdev_destroy(struct scsi_device *scsi_device)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
@@ -4415,9 +4415,9 @@ static void adapter_uninit(struct AdapterCtlBlk *acb)
/* remove timers */
if (timer_pending(&acb->waiting_timer))
- del_timer(&acb->waiting_timer);
+ timer_delete(&acb->waiting_timer);
if (timer_pending(&acb->selto_timer))
- del_timer(&acb->selto_timer);
+ timer_delete(&acb->selto_timer);
adapter_uninit_chip(acb);
adapter_remove_and_free_all_devices(acb);
@@ -4547,8 +4547,8 @@ static const struct scsi_host_template dc395x_driver_template = {
.show_info = dc395x_show_info,
.name = DC395X_BANNER " " DC395X_VERSION,
.queuecommand = dc395x_queue_command,
- .slave_alloc = dc395x_slave_alloc,
- .slave_destroy = dc395x_slave_destroy,
+ .sdev_init = dc395x_sdev_init,
+ .sdev_destroy = dc395x_sdev_destroy,
.can_queue = DC395x_MAX_CAN_QUEUE,
.this_id = 7,
.sg_tablesize = DC395x_MAX_SG_TABLESIZE,
@@ -4668,7 +4668,7 @@ static void dc395x_remove_one(struct pci_dev *dev)
}
-static struct pci_device_id dc395x_pci_table[] = {
+static const struct pci_device_id dc395x_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_TEKRAM,
.device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index dfb091d34363..d6d091b2f3c7 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -127,7 +127,7 @@ static void dmx3191d_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id dmx3191d_pci_tbl[] = {
+static const struct pci_device_id dmx3191d_pci_tbl[] = {
{PCI_VENDOR_ID_DOMEX, PCI_DEVICE_ID_DOMEX_DMX3191D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ }
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 55d2301bfd7d..1bd42f7db177 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -310,7 +310,7 @@ efct_fw_reset(struct efct *efct)
* during attach.
*/
if (timer_pending(&efct->xport->stats_timer))
- del_timer(&efct->xport->stats_timer);
+ timer_delete(&efct->xport->stats_timer);
if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
efc_log_info(efct, "failed to reset firmware\n");
@@ -470,7 +470,7 @@ out:
return rc;
}
-static struct pci_device_id efct_pci_table[] = {
+static const struct pci_device_id efct_pci_table[] = {
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
{} /* terminate list */
@@ -735,7 +735,7 @@ efct_pci_io_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, efct_pci_table);
-static struct pci_error_handlers efct_pci_err_handler = {
+static const struct pci_error_handlers efct_pci_err_handler = {
.error_detected = efct_pci_io_error_detected,
.slot_reset = efct_pci_io_slot_reset,
.resume = efct_pci_io_resume,
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 5a5525054d71..5b079b8b7a08 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -1120,7 +1120,7 @@ int
efct_hw_parse_filter(struct efct_hw *hw, void *value)
{
int rc = 0;
- char *p = NULL;
+ char *p = NULL, *pp = NULL;
char *token;
u32 idx = 0;
@@ -1132,6 +1132,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
efc_log_err(hw->os, "p is NULL\n");
return -ENOMEM;
}
+ pp = p;
idx = 0;
while ((token = strsep(&p, ",")) && *token) {
@@ -1144,7 +1145,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
if (idx == ARRAY_SIZE(hw->config.filter_def))
break;
}
- kfree(p);
+ kfree(pp);
return rc;
}
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
index cf4dced20b8b..2aca60f6428e 100644
--- a/drivers/scsi/elx/efct/efct_xport.c
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -508,7 +508,7 @@ efct_xport_detach(struct efct_xport *xport)
/*Shutdown FC Statistics timer*/
if (timer_pending(&xport->stats_timer))
- del_timer(&xport->stats_timer);
+ timer_delete(&xport->stats_timer);
efct_hw_teardown(&efct->hw);
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index 9661eea93aa1..cf7e738c4edc 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -888,7 +888,7 @@ gidpt_delay_timer_cb(struct timer_list *t)
{
struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
- del_timer(&node->gidpt_delay_timer);
+ timer_delete(&node->gidpt_delay_timer);
efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
}
diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c
index a1b4ce6a27b4..f17e052fe537 100644
--- a/drivers/scsi/elx/libefc/efc_node.c
+++ b/drivers/scsi/elx/libefc/efc_node.c
@@ -149,7 +149,7 @@ efc_node_free(struct efc_node *node)
/* if the gidpt_delay_timer is still running, then delete it */
if (timer_pending(&node->gidpt_delay_timer))
- del_timer(&node->gidpt_delay_timer);
+ timer_delete(&node->gidpt_delay_timer);
xa_erase(&nport->lookup, node->rnode.fc_id);
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index 1e2d7c63a8e3..c48275d53aef 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -1411,11 +1411,11 @@ static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
}
/* sysfs handlers */
-extern struct bin_attribute bin_attr_fw;
-extern struct bin_attribute bin_attr_fs;
-extern struct bin_attribute bin_attr_vda;
-extern struct bin_attribute bin_attr_hw;
-extern struct bin_attribute bin_attr_live_nvram;
-extern struct bin_attribute bin_attr_default_nvram;
+extern const struct bin_attribute bin_attr_fw;
+extern const struct bin_attribute bin_attr_fs;
+extern const struct bin_attribute bin_attr_vda;
+extern const struct bin_attribute bin_attr_hw;
+extern const struct bin_attribute bin_attr_live_nvram;
+extern const struct bin_attribute bin_attr_default_nvram;
#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 0cea5f3d1a08..04a07fe57be2 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -439,7 +439,7 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
if ((test_bit(AF2_INIT_DONE, &a->flags2))
&& (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
if (!power_management) {
- del_timer_sync(&a->timer);
+ timer_delete_sync(&a->timer);
tasklet_kill(&a->tasklet);
}
esas2r_power_down(a);
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index f700a16cd885..44871746944a 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -66,7 +66,7 @@ static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
}
static ssize_t read_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -75,7 +75,7 @@ static ssize_t read_fw(struct file *file, struct kobject *kobj,
}
static ssize_t write_fw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -84,7 +84,7 @@ static ssize_t write_fw(struct file *file, struct kobject *kobj,
}
static ssize_t read_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -93,7 +93,7 @@ static ssize_t read_fs(struct file *file, struct kobject *kobj,
}
static ssize_t write_fs(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -109,7 +109,7 @@ static ssize_t write_fs(struct file *file, struct kobject *kobj,
}
static ssize_t read_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -118,7 +118,7 @@ static ssize_t read_vda(struct file *file, struct kobject *kobj,
}
static ssize_t write_vda(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -127,7 +127,7 @@ static ssize_t write_vda(struct file *file, struct kobject *kobj,
}
static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -138,7 +138,7 @@ static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -158,7 +158,7 @@ static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -169,7 +169,7 @@ static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
}
static ssize_t read_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -187,7 +187,7 @@ static ssize_t read_hw(struct file *file, struct kobject *kobj,
}
static ssize_t write_hw(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
@@ -211,12 +211,12 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
}
#define ESAS2R_RW_BIN_ATTR(_name) \
- struct bin_attribute bin_attr_ ## _name = { \
+ const struct bin_attribute bin_attr_ ## _name = { \
.attr = \
{ .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
.size = 0, \
- .read = read_ ## _name, \
- .write = write_ ## _name }
+ .read_new = read_ ## _name, \
+ .write_new = write_ ## _name }
ESAS2R_RW_BIN_ATTR(fw);
ESAS2R_RW_BIN_ATTR(fs);
@@ -224,10 +224,10 @@ ESAS2R_RW_BIN_ATTR(vda);
ESAS2R_RW_BIN_ATTR(hw);
ESAS2R_RW_BIN_ATTR(live_nvram);
-struct bin_attribute bin_attr_default_nvram = {
+const struct bin_attribute bin_attr_default_nvram = {
.attr = { .name = "default_nvram", .mode = S_IRUGO },
.size = 0,
- .read = read_default_nvram,
+ .read_new = read_default_nvram,
.write = NULL
};
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 0175d2282b45..802718ffad84 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2261,7 +2261,7 @@ static void esp_init_swstate(struct esp *esp)
INIT_LIST_HEAD(&esp->active_cmds);
INIT_LIST_HEAD(&esp->esp_cmd_pool);
- /* Start with a clear state, domain validation (via ->slave_configure,
+ /* Start with a clear state, domain validation (via ->sdev_configure,
* spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
* commands.
*/
@@ -2441,7 +2441,7 @@ static void esp_target_destroy(struct scsi_target *starget)
tp->starget = NULL;
}
-static int esp_slave_alloc(struct scsi_device *dev)
+static int esp_sdev_init(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2463,7 +2463,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
return 0;
}
-static int esp_slave_configure(struct scsi_device *dev)
+static int esp_sdev_configure(struct scsi_device *dev, struct queue_limits *lim)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
@@ -2479,7 +2479,7 @@ static int esp_slave_configure(struct scsi_device *dev)
return 0;
}
-static void esp_slave_destroy(struct scsi_device *dev)
+static void esp_sdev_destroy(struct scsi_device *dev)
{
struct esp_lun_data *lp = dev->hostdata;
@@ -2667,9 +2667,9 @@ const struct scsi_host_template scsi_esp_template = {
.queuecommand = esp_queuecommand,
.target_alloc = esp_target_alloc,
.target_destroy = esp_target_destroy,
- .slave_alloc = esp_slave_alloc,
- .slave_configure = esp_slave_configure,
- .slave_destroy = esp_slave_destroy,
+ .sdev_init = esp_sdev_init,
+ .sdev_configure = esp_sdev_configure,
+ .sdev_destroy = esp_sdev_destroy,
.eh_abort_handler = esp_eh_abort_handler,
.eh_bus_reset_handler = esp_eh_bus_reset_handler,
.eh_host_reset_handler = esp_eh_host_reset_handler,
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 00cd7c0ccc76..7bb0b69bff24 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -80,7 +80,7 @@
/* ESP config register 4 read-write */
#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */
-#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
+#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode (fsc) */
#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */
#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */
#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 39aec710660c..b911fdb387f3 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -269,7 +269,7 @@ static const struct scsi_host_template fcoe_shost_template = {
.eh_abort_handler = fc_eh_abort,
.eh_device_reset_handler = fc_eh_device_reset,
.eh_host_reset_handler = fc_eh_host_reset,
- .slave_alloc = fc_slave_alloc,
+ .sdev_init = fc_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -1013,7 +1013,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
fc_lport_destroy(lport);
/* Stop the transmit retry timer */
- del_timer_sync(&port->timer);
+ timer_delete_sync(&port->timer);
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5c8d1ba3f8f3..56d270526c9c 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -302,7 +302,7 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
fcoe_ctlr_set_state(fip, FIP_ST_DISABLED);
fcoe_ctlr_reset_fcfs(fip);
mutex_unlock(&fip->ctlr_mutex);
- del_timer_sync(&fip->timer);
+ timer_delete_sync(&fip->timer);
cancel_work_sync(&fip->timer_work);
}
EXPORT_SYMBOL(fcoe_ctlr_destroy);
@@ -478,7 +478,7 @@ EXPORT_SYMBOL(fcoe_ctlr_link_up);
static void fcoe_ctlr_reset(struct fcoe_ctlr *fip)
{
fcoe_ctlr_reset_fcfs(fip);
- del_timer(&fip->timer);
+ timer_delete(&fip->timer);
fip->ctlr_ka_time = 0;
fip->port_ka_time = 0;
fip->sol_time = 0;
diff --git a/drivers/scsi/fdomain_pci.c b/drivers/scsi/fdomain_pci.c
index 3e05ce7b89e5..c15b2ce76e9f 100644
--- a/drivers/scsi/fdomain_pci.c
+++ b/drivers/scsi/fdomain_pci.c
@@ -47,7 +47,7 @@ static void fdomain_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id fdomain_pci_table[] = {
+static const struct pci_device_id fdomain_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70) },
{}
};
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 6214a6b2e96d..c025e875009e 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -2,11 +2,13 @@
obj-$(CONFIG_FCOE_FNIC) += fnic.o
fnic-y := \
+ fip.o\
fnic_attrs.o \
fnic_isr.o \
fnic_main.o \
fnic_res.o \
fnic_fcs.o \
+ fdls_disc.o \
fnic_scsi.o \
fnic_trace.o \
fnic_debugfs.o \
@@ -15,4 +17,5 @@ fnic-y := \
vnic_intr.o \
vnic_rq.o \
vnic_wq_copy.o \
- vnic_wq.o
+ vnic_wq.o \
+ fnic_pci_subsys_devid.o
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
new file mode 100644
index 000000000000..146744ca97c2
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -0,0 +1,5040 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/workqueue.h>
+#include "fnic.h"
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/scsi_transport_fc.h>
+#include <linux/utsname.h>
+
+#define FC_FC4_TYPE_SCSI 0x08
+#define PORT_SPEED_BIT_8 8
+#define PORT_SPEED_BIT_9 9
+#define PORT_SPEED_BIT_14 14
+#define PORT_SPEED_BIT_15 15
+
+/* FNIC FDMI Register HBA Macros */
+#define FNIC_FDMI_NUM_PORTS 1
+#define FNIC_FDMI_NUM_HBA_ATTRS 9
+#define FNIC_FDMI_TYPE_NODE_NAME 0X1
+#define FNIC_FDMI_TYPE_MANUFACTURER 0X2
+#define FNIC_FDMI_MANUFACTURER "Cisco Systems"
+#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3
+#define FNIC_FDMI_TYPE_MODEL 0X4
+#define FNIC_FDMI_TYPE_MODEL_DES 0X5
+#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card"
+#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6
+#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7
+#define FNIC_FDMI_TYPE_ROM_VERSION 0X8
+#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9
+#define FNIC_FDMI_NN_LEN 8
+#define FNIC_FDMI_MANU_LEN 20
+#define FNIC_FDMI_SERIAL_LEN 16
+#define FNIC_FDMI_MODEL_LEN 12
+#define FNIC_FDMI_MODEL_DES_LEN 56
+#define FNIC_FDMI_HW_VER_LEN 16
+#define FNIC_FDMI_DR_VER_LEN 28
+#define FNIC_FDMI_ROM_VER_LEN 8
+#define FNIC_FDMI_FW_VER_LEN 16
+
+/* FNIC FDMI Register PA Macros */
+#define FNIC_FDMI_TYPE_FC4_TYPES 0X1
+#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2
+#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3
+#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4
+#define FNIC_FDMI_TYPE_OS_NAME 0X5
+#define FNIC_FDMI_TYPE_HOST_NAME 0X6
+#define FNIC_FDMI_NUM_PORT_ATTRS 6
+#define FNIC_FDMI_FC4_LEN 32
+#define FNIC_FDMI_SUPP_SPEED_LEN 4
+#define FNIC_FDMI_CUR_SPEED_LEN 4
+#define FNIC_FDMI_MFS_LEN 4
+#define FNIC_FDMI_MFS 0x800
+#define FNIC_FDMI_OS_NAME_LEN 16
+#define FNIC_FDMI_HN_LEN 24
+
+#define FDLS_FDMI_PLOGI_PENDING 0x1
+#define FDLS_FDMI_REG_HBA_PENDING 0x2
+#define FDLS_FDMI_RPA_PENDING 0x4
+#define FDLS_FDMI_ABORT_PENDING 0x8
+#define FDLS_FDMI_MAX_RETRY 3
+
+#define RETRIES_EXHAUSTED(iport) \
+ (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY)
+
+#define FNIC_TPORT_MAX_NEXUS_RESTART (8)
+
+#define SCHEDULE_OXID_FREE_RETRY_TIME (300)
+
+/* Private Functions */
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport);
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport);
+static void fdls_send_rpn_id(struct fnic_iport_s *iport);
+static void fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ void *rx_frame);
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport);
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport);
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid,
+ uint64_t wwpn);
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport);
+static void fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout);
+static void fdls_tport_timer_callback(struct timer_list *t);
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport);
+static void fdls_start_fabric_timer(struct fnic_iport_s *iport,
+ int timeout);
+static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport);
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport);
+
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *frame = NULL;
+
+ frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame");
+ return NULL;
+ }
+
+ memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ);
+ return frame;
+}
+
+/**
+ * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool
+ * @iport: Handle to iport instance
+ * @oxid_frame_type: Type of frame to allocate
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ int idx;
+ uint16_t oxid;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ /*
+ * Allocate next available oxid from bitmap
+ */
+ idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx);
+ if (idx == FNIC_OXID_POOL_SZ) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Alloc oxid: all oxid slots are busy iport state:%d\n",
+ iport->state);
+ return FNIC_UNASSIGNED_OXID;
+ }
+
+ WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap));
+ oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */
+
+ oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type);
+ *active_oxid = oxid;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "alloc oxid: 0x%x, iport state: %d\n",
+ oxid, iport->state);
+ return oxid;
+}
+
+/**
+ * fdls_free_oxid_idx - Free the oxid using the idx
+ * @iport: Handle to iport instance
+ * @oxid_idx: The index to free
+ *
+ * Free the oxid immediately and make it available for new requests
+ * Called with fnic lock held
+ */
+static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "free oxid idx: 0x%x\n", oxid_idx);
+
+ WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap));
+}
+
+/**
+ * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work
+ * @work: Handle to work_struct
+ *
+ * Scheduled when an oxid is to be freed later
+ * After freeing expired oxid(s), the handler schedules
+ * another callback with the remaining time
+ * of next unexpired entry in the reclaim list.
+ */
+void fdls_reclaim_oxid_handler(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, oxid_reclaim_work.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry, *next;
+ unsigned long delay_j, cur_jiffies;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reclaim oxid callback\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ /* Though the work was scheduled for one entry,
+ * walk through and free the expired entries which might have been scheduled
+ * at around the same time as the first entry
+ */
+ list_for_each_entry_safe(reclaim_entry, next,
+ &(oxid_pool->oxid_reclaim_list), links) {
+
+ /* The list is always maintained in the order of expiry time */
+ cur_jiffies = jiffies;
+ if (time_before(cur_jiffies, reclaim_entry->expires))
+ break;
+
+ list_del(&reclaim_entry->links);
+ fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx);
+ kfree(reclaim_entry);
+ }
+
+ /* schedule to free up the next entry */
+ if (!list_empty(&oxid_pool->oxid_reclaim_list)) {
+ reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list,
+ struct reclaim_entry_s, links);
+
+ delay_j = reclaim_entry->expires - cur_jiffies;
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Scheduling next callback at:%ld jiffies\n", delay_j);
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+}
+
+/**
+ * fdls_free_oxid - Helper function to free the oxid
+ * @iport: Handle to iport instance
+ * @oxid: oxid to free
+ * @active_oxid: the oxid which is in use
+ *
+ * Called with fnic lock held
+ */
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid)
+{
+ fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid));
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+}
+
+/**
+ * fdls_schedule_oxid_free - Schedule oxid to be freed later
+ * @iport: Handle to iport instance
+ * @active_oxid: the oxid which is in use
+ *
+ * Gets called in a rare case scenario when both a command
+ * (fdls or target discovery) timed out and the following ABTS
+ * timed out as well, without a link change.
+ *
+ * Called with fnic lock held
+ */
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ int oxid_idx = FNIC_OXID_IDX(*active_oxid);
+
+ lockdep_assert_held(&fnic->fnic_lock);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid: 0x%x\n", *active_oxid);
+
+ *active_oxid = FNIC_UNASSIGNED_OXID;
+
+ reclaim_entry = (struct reclaim_entry_s *)
+ kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC);
+
+ if (!reclaim_entry) {
+ FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for reclaim struct for oxid idx: %d\n",
+ oxid_idx);
+
+ /* Retry the scheduling */
+ WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free));
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0);
+ return;
+ }
+
+ reclaim_entry->oxid_idx = oxid_idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+}
+
+/**
+ * fdls_schedule_oxid_free_retry_work - Thread to schedule the
+ * oxid to be freed later
+ *
+ * @work: Handle to the work struct
+ */
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
+{
+ struct fnic_oxid_pool_s *oxid_pool = container_of(work,
+ struct fnic_oxid_pool_s, schedule_oxid_free_retry.work);
+ struct fnic_iport_s *iport = container_of(oxid_pool,
+ struct fnic_iport_s, oxid_pool);
+ struct fnic *fnic = iport->fnic;
+ struct reclaim_entry_s *reclaim_entry;
+ unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ unsigned long flags;
+ int idx;
+
+ for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Schedule oxid free. oxid idx: %d\n", idx);
+
+ reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL);
+ if (!reclaim_entry) {
+ schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
+ msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
+ return;
+ }
+
+ clear_bit(idx, oxid_pool->pending_schedule_free);
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ }
+}
+
+static bool fdls_is_oxid_fabric_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_fdmi_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool fdls_is_oxid_tgt_req(uint16_t oxid)
+{
+ int oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static void fdls_reset_oxid_pool(struct fnic_iport_s *iport)
+{
+ struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool;
+
+ oxid_pool->next_idx = 0;
+}
+
+void fnic_del_fabric_timer_sync(struct fnic *fnic)
+{
+ fnic->iport.fabric.del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ timer_delete_sync(&fnic->iport.fabric.retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->iport.fabric.del_timer_inprogress = 0;
+}
+
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport)
+{
+ tport->del_timer_inprogress = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ timer_delete_sync(&tport->retry_timer);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ tport->del_timer_inprogress = 0;
+}
+
+static void
+fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ iport->fabric.timer_pending = 0;
+ }
+
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ iport->fabric.retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov));
+ iport->fabric.timer_pending = 1;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric timer is %d ", timeout);
+}
+
+static void
+fdls_start_tport_timer(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, int timeout)
+{
+ u64 fabric_tov;
+ struct fnic *fnic = iport->fnic;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED))
+ tport->retry_counter++;
+
+ fabric_tov = jiffies + msecs_to_jiffies(timeout);
+ mod_timer(&tport->retry_timer, round_jiffies(fabric_tov));
+ tport->timer_pending = 1;
+}
+
+void fdls_init_plogi_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_flogi *pplogi;
+ uint8_t s_id[3];
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pplogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_PLOGI,
+ .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_features = cpu_to_be16(FC_SP_FT_CIRO),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ),
+ .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS),
+ .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO),
+ .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)},
+ .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ),
+ .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800),
+ .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF),
+ .fl_cssp[2].cp_open_seq = 1}
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size);
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pplogi->fchdr, s_id);
+}
+
+static void fdls_init_els_acc_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint8_t s_id[3];
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_acc = (struct fc_std_els_acc_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP,
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .acc.la_cmd = ELS_LS_ACC,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id);
+ FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_els_rjt_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_els_rjt_rsp *pels_rjt;
+
+ pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pels_rjt = (struct fc_std_els_rjt_rsp) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}},
+ .rej.er_cmd = ELS_LS_RJT,
+ };
+
+ FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID);
+}
+
+static void fdls_init_logo_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_std_logo *plogo;
+ uint8_t s_id[3];
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *plogo = (struct fc_std_logo) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}},
+ .els.fl_cmd = ELS_LOGO,
+ };
+
+ hton24(s_id, iport->fcid);
+ FNIC_STD_SET_S_ID(plogo->fchdr, s_id);
+ memcpy(plogo->els.fl_n_port_id, s_id, 3);
+
+ FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn,
+ iport->wwpn);
+}
+
+static void fdls_init_fabric_abts_frame(uint8_t *frame,
+ struct fnic_iport_s *iport)
+{
+ struct fc_frame_header *pfabric_abts;
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pfabric_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_s_id = {0x00, 0x00, 0x00},
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+}
+
+static void
+fdls_send_rscn_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *rscn_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RSCN response");
+ return;
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(rscn_fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RSCN response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_send_logo_resp(struct fnic_iport_s *iport,
+ struct fc_frame_header *req_fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *plogo_resp;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_acc_rsp);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send LOGO response");
+ return;
+ }
+
+ plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(req_fchdr);
+ FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send LOGO response with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+void
+fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *ptport_abts;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send tport ABTS");
+ return;
+ }
+
+ ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *ptport_abts = (struct fc_frame_header) {
+ .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */
+ .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS,
+ .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00,
+ .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000,
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID),
+ .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */
+ };
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_S_ID(*ptport_abts, s_id);
+ FNIC_STD_SET_D_ID(*ptport_abts, d_id);
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tport abts: tport->state: %d ",
+ iport->fcid, tport->state);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+static void fdls_send_fabric_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ struct fnic *fnic = iport->fnic;
+ struct fc_frame_header *pfabric_abts;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric ABTS");
+ return;
+ }
+
+ pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(s_id, iport->fcid);
+
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_LOGO:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_FLOGI:
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_FABRIC_PLOGI:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_RPN_ID:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_SCR:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_FCTRL);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+
+ case FDLS_STATE_GPN_FT:
+ FNIC_STD_SET_S_ID(*pfabric_abts, s_id);
+ hton24(d_id, FC_FID_DIR_SERV);
+ FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ break;
+ default:
+ return;
+ }
+
+ oxid = iport->active_oxid_fabric_req;
+ FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, oxid);
+
+ iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ iport->fabric.timer_pending = 1;
+}
+
+static uint8_t *fdls_alloc_init_fdmi_abts_frame(struct fnic_iport_s *iport,
+ uint16_t oxid)
+{
+ struct fc_frame_header *pfdmi_abts;
+ uint8_t d_id[3];
+ uint8_t *frame;
+ struct fnic *fnic = iport->fnic;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI ABTS");
+ return NULL;
+ }
+
+ pfdmi_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_fabric_abts_frame(frame, iport);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(*pfdmi_abts, d_id);
+ FNIC_STD_SET_OX_ID(*pfdmi_abts, oxid);
+
+ return frame;
+}
+
+static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ unsigned long fdmi_tov;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
+
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_plogi);
+ if (frame == NULL)
+ return;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_rhba);
+ if (frame == NULL)
+ return;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_rpa);
+ if (frame == NULL) {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ goto arm_timer;
+ else
+ return;
+ }
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ }
+ }
+
+arm_timer:
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING;
+}
+
+static void fdls_send_fabric_flogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pflogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pflogi = (struct fc_std_flogi) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els.fl_cmd = ELS_FLOGI,
+ .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI,
+ .sp_lo_ver = FNIC_FC_PH_VER_LO,
+ .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT),
+ .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)},
+ .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ };
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn);
+ FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size);
+ FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov);
+ FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_sent);
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fabric_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send PLOGI");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ u64 fdmi_tov;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI PLOGI");
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI,
+ &iport->active_oxid_fdmi_plogi);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI PLOGI",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ hton24(d_id, FC_FID_MGMT_SERV);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
+ mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
+ iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING;
+}
+
+static void fdls_send_rpn_id(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rpn_id *prpn_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rpn_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RPN_ID");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpn_id = (struct fc_std_rpn_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid);
+
+ FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid);
+ FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RPN_ID",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_scr(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_scr *pscr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_scr);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send SCR");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pscr = (struct fc_std_scr) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .scr = {.scr_cmd = ELS_SCR,
+ .scr_reg_func = ELS_SCRF_FULL}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pscr->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send SCR",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pscr->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.fabric_scr_sent);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state)
+{
+ uint8_t *frame;
+ struct fc_std_gpn_ft *pgpn_ft;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_gpn_ft);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send GPN FT");
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pgpn_ft = (struct fc_std_gpn_ft) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)},
+ .gpn_ft.fn_fc4_type = 0x08
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send GPN FT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+ fdls_set_state((&iport->fabric), fdls_state);
+}
+
+static void
+fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_adisc *padisc;
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT ADISC");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+ memcpy(padisc->els.adisc_port_id, s_id, 3);
+ FNIC_STD_SET_S_ID(padisc->fchdr, s_id);
+ FNIC_STD_SET_D_ID(padisc->fchdr, d_id);
+
+ FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ);
+ FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT ADISC",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(padisc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn,
+ iport->wwnn);
+
+ padisc->els.adisc_cmd = ELS_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send ADISC to tgt fcid: 0x%x",
+ iport->fcid, tport->fcid);
+
+ atomic64_inc(&iport->iport_stats.tport_adisc_sent);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+err_out:
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov);
+}
+
+bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ struct fnic_tport_event_s *tport_del_evt;
+ struct fnic *fnic = iport->fnic;
+
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING)
+ || (tport->state == FDLS_TGT_STATE_OFFLINE)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: tport state is offlining/offline\n",
+ tport->fcid);
+ return false;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ /*
+ * By setting this flag, the tport will not be seen in a look-up
+ * in an RSCN. Even if we move to multithreaded model, this tport
+ * will be destroyed and a new RSCN will have to create a new one
+ */
+ tport->flags |= FNIC_FDLS_TPORT_TERMINATING;
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_rport_exch_reset(iport->fnic, tport->fcid);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) {
+ tport_del_evt =
+ kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport fcid: 0x%0x\n",
+ tport->fcid);
+ return false;
+ }
+ tport_del_evt->event = TGT_EV_RPORT_DEL;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%x not reg with scsi_transport. Freeing locally",
+ tport->fcid);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+ return true;
+}
+
+static void
+fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_flogi *pplogi;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_flogi);
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PLOGI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_plogi_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+ FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid);
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(pplogi->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_plogi_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+static uint16_t
+fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport,
+ struct fc_std_flogi *plogi_rsp)
+{
+ uint16_t b2b_rdf_size =
+ be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els));
+ uint16_t spc3_rdf_size =
+ be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x",
+ b2b_rdf_size, spc3_rdf_size);
+
+ return min(b2b_rdf_size, spc3_rdf_size);
+}
+
+static void fdls_send_register_fc4_types(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rft_id *prft_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rft_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFT");
+ return;
+ }
+
+ prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prft_id = (struct fc_std_rft_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)}
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prft_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFT",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prft_id->rft_id.fr_fts.ff_type_map[0] =
+ cpu_to_be32(1 << FC_TYPE_FCP);
+
+ prft_id->rft_id.fr_fts.ff_type_map[1] =
+ cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW));
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void fdls_send_register_fc4_features(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_rff_id *prff_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rff_id);
+ uint8_t fcid[3];
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RFF");
+ return;
+ }
+
+ prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prff_id = (struct fc_std_rff_id) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR,
+ .ct_fs_subtype = FC_NS_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)},
+ .rff_id.fr_feat = 0x2,
+ .rff_id.fr_type = FC_TYPE_FCP
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prff_id->fchdr, fcid);
+ FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send RFF",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid,
+ oxid);
+
+ prff_id->rff_id.fr_type = FC_TYPE_FCP;
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+static void
+fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_els_prli *pprli;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_prli);
+ uint8_t s_id[3];
+ uint8_t d_id[3];
+ uint32_t timeout;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send TGT PRLI");
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pprli = (struct fc_std_els_prli) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els_prli = {.prli_cmd = ELS_PRLI,
+ .prli_spp_len = 16,
+ .prli_len = cpu_to_be16(0x14)},
+ .sp = {.spp_type = 0x08, .spp_flags = 0x0020,
+ .spp_params = cpu_to_be32(0xA2)}
+ };
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x",
+ iport->fcid, tport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ goto err_out;
+ }
+
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+
+ hton24(s_id, iport->fcid);
+ hton24(d_id, tport->fcid);
+
+ FNIC_STD_SET_OX_ID(pprli->fchdr, oxid);
+ FNIC_STD_SET_S_ID(pprli->fchdr, s_id);
+ FNIC_STD_SET_D_ID(pprli->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x",
+ iport->fcid, tport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ atomic64_inc(&iport->iport_stats.tport_prli_sent);
+
+err_out:
+ timeout = max(2 * iport->e_d_tov, iport->plogi_timeout);
+ /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */
+ fdls_start_tport_timer(iport, tport, timeout);
+}
+
+/**
+ * fdls_send_fabric_logo - Send flogo to the fcf
+ * @iport: Handle to fnic iport
+ *
+ * This function does not change or check the fabric state.
+ * It the caller's responsibility to set the appropriate iport fabric
+ * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO.
+ * Currently this assumes to be called with fnic lock held.
+ */
+void fdls_send_fabric_logo(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO,
+ &iport->active_oxid_fabric_req);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send fabric LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, FC_FID_FLOGI);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ fdls_start_fabric_timer(iport, 2 * iport->e_d_tov);
+}
+
+/**
+ * fdls_tgt_logout - Send plogo to the remote port
+ * @iport: Handle to fnic iport
+ * @tport: Handle to remote port
+ *
+ * This function does not change or check the fabric/tport state.
+ * It the caller's responsibility to set the appropriate tport/fabric
+ * state when this is called. Normally that is fdls_tgt_state_plogo.
+ * This could be used to send plogo to nameserver process
+ * also not just target processes
+ */
+void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport)
+{
+ uint8_t *frame;
+ struct fc_std_logo *plogo;
+ struct fnic *fnic = iport->fnic;
+ uint8_t d_id[3];
+ uint16_t oxid;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_logo);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send fabric LOGO");
+ return;
+ }
+
+ plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_logo_frame(frame, iport);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send tgt LOGO",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(plogo->fchdr, oxid);
+
+ hton24(d_id, tport->fcid);
+ FNIC_STD_SET_D_ID(plogo->fchdr, d_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send tgt LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+
+ atomic64_inc(&iport->iport_stats.tport_logo_sent);
+}
+
+static void fdls_tgt_discovery_start(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Starting FDLS target discovery", iport->fcid);
+
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ break;
+ }
+ /* if we marked the tport as deleted due to GPN_FT
+ * We should not send ADISC anymore
+ */
+ if ((tport->state == FDLS_TGT_STATE_OFFLINING) ||
+ (tport->state == FDLS_TGT_STATE_OFFLINE))
+ continue;
+
+ /* For tports which have received RSCN */
+ if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) {
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC);
+ tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC;
+ fdls_send_tgt_adisc(iport, tport);
+ continue;
+ }
+ if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) {
+ /* Not a new port, skip */
+ continue;
+ }
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ fdls_send_tgt_plogi(iport, tport);
+ }
+ fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY);
+}
+
+/*
+ * Function to restart the IT nexus if we received any out of
+ * sequence PLOGI/PRLI response from the target.
+ * The memory for the new tport structure is allocated
+ * inside fdls_create_tport and added to the iport's tport list.
+ * This will get freed later during tport_offline/linkdown
+ * or module unload. The new_tport pointer will go out of scope
+ * safely since the memory it is
+ * pointing to it will be freed later
+ */
+static void fdls_target_restart_nexus(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = tport->iport;
+ struct fnic_tport_s *new_tport = NULL;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int nexus_restart_count;
+ struct fnic *fnic = iport->fnic;
+ bool retval = true;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x state: %d restart_count: %d",
+ tport->fcid, tport->state, tport->nexus_restart_count);
+
+ fcid = tport->fcid;
+ wwpn = tport->wwpn;
+ nexus_restart_count = tport->nexus_restart_count;
+
+ retval = fdls_delete_tport(iport, tport);
+ if (retval != true) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Error deleting tport: 0x%x", fcid);
+ return;
+ }
+
+ if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded nexus restart retries tport: 0x%x",
+ fcid);
+ return;
+ }
+
+ /*
+ * Allocate memory for the new tport and add it to
+ * iport's tport list.
+ * This memory will be freed during tport_offline/linkdown
+ * or module unload. The pointer new_tport is safe to go
+ * out of scope when this function returns, since the memory
+ * it is pointing to is guaranteed to be freed later
+ * as mentioned above.
+ */
+ new_tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!new_tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error creating new tport: 0x%x", fcid);
+ return;
+ }
+
+ new_tport->nexus_restart_count = nexus_restart_count + 1;
+ fdls_send_tgt_plogi(iport, new_tport);
+ fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI);
+}
+
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->fcid == fcid)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport,
+ uint32_t fcid, uint64_t wwpn)
+{
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn);
+
+ tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Memory allocation failure while creating tport: 0x%x\n",
+ fcid);
+ return NULL;
+ }
+
+ tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ;
+ tport->r_a_tov = FC_DEF_R_A_TOV;
+ tport->e_d_tov = FC_DEF_E_D_TOV;
+ tport->fcid = fcid;
+ tport->wwpn = wwpn;
+ tport->iport = iport;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Need to setup tport timer callback");
+
+ timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added tport 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT);
+ list_add_tail(&tport->links, &iport->tport_list);
+ atomic_set(&tport->in_flight, 0);
+ return tport;
+}
+
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn)
+{
+ struct fnic_tport_s *tport, *next;
+
+ list_for_each_entry_safe(tport, next, &(iport->tport_list), links) {
+ if ((tport->wwpn == wwpn)
+ && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING))
+ return tport;
+ }
+ return NULL;
+}
+
+static void
+fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len,
+ void *data, u32 *off)
+{
+ u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *)
+ ((u8 *)attr_start + *off);
+
+ put_unaligned_be16(type, &fdmi_attr->type);
+ put_unaligned_be16(size, &fdmi_attr->len);
+ memcpy(fdmi_attr->value, data, len);
+ *off += size;
+}
+
+static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rhba *prhba;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ int err;
+ struct fnic *fnic = iport->fnic;
+ struct vnic_devcmd_fw_info *fw_info = NULL;
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RHBA");
+ return;
+ }
+
+ prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prhba = (struct fc_std_fdmi_rhba) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0XFF, 0XFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RHBA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prhba->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA,
+ &iport->active_oxid_fdmi_rhba);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RHBA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prhba->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport);
+ put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS,
+ &prhba->rhba.hba_attrs.numattrs);
+
+ fdmi_attr = prhba->rhba.hba_attrs.attr;
+ attr_off_bytes = 0;
+
+ put_unaligned_be64(iport->wwnn, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME,
+ FNIC_FDMI_NN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "NN set, off=%d", attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER,
+ FNIC_FDMI_MANU_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFG set <%s>, off=%d", data, attr_off_bytes);
+
+ err = vnic_dev_fw_info(fnic->vdev, &fw_info);
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_serial_number,
+ FNIC_FDMI_SERIAL_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER,
+ FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SERIAL set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
+ fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
+ strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
+ data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES,
+ FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION,
+ FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "HW_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ }
+
+ strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION,
+ FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "DRV_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION,
+ FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ROM_VER set <%s>, off=%d", data, attr_off_bytes);
+
+ if (!err) {
+ strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION,
+ FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW_VER set <%s>, off=%d", data, attr_off_bytes);
+ }
+
+ len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING;
+}
+
+static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fc_std_fdmi_rpa *prpa;
+ struct fc_fdmi_attr_entry *fdmi_attr;
+ uint8_t fcid[3];
+ struct fnic *fnic = iport->fnic;
+ u32 port_speed_bm;
+ u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ uint16_t oxid;
+ u32 attr_off_bytes, len;
+ u8 tmp_data[16], data[64];
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send FDMI RPA");
+ return;
+ }
+
+ prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *prpa = (struct fc_std_fdmi_rpa) {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL,
+ .fh_d_id = {0xFF, 0xFF, 0xFA},
+ .fh_type = FC_TYPE_CT,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)
+ },
+ .fc_std_ct_hdr = {
+ .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT,
+ .ct_fs_subtype = FC_FDMI_SUBTYPE,
+ .ct_cmd = cpu_to_be16(FC_FDMI_RPA)
+ },
+ };
+
+ hton24(fcid, iport->fcid);
+ FNIC_STD_SET_S_ID(prpa->fchdr, fcid);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA,
+ &iport->active_oxid_fdmi_rpa);
+
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate OXID to send FDMI RPA",
+ iport->fcid);
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(prpa->fchdr, oxid);
+
+ put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname);
+ put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS,
+ &prpa->rpa.hba_attrs.numattrs);
+
+ /* MDS does not support GIGE speed.
+ * Bit shift standard definitions from scsi_transport_fc.h to
+ * match FC spec.
+ */
+ switch (port_speed) {
+ case DCEM_PORTSPEED_10G:
+ case DCEM_PORTSPEED_20G:
+ /* There is no bit for 20G */
+ port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14;
+ break;
+ case DCEM_PORTSPEED_25G:
+ port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9;
+ break;
+ case DCEM_PORTSPEED_100G:
+ port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8;
+ break;
+ default:
+ port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15;
+ break;
+ }
+ attr_off_bytes = 0;
+
+ fdmi_attr = prpa->rpa.hba_attrs.attr;
+
+ put_unaligned_be64(iport->wwnn, data);
+
+ memset(data, 0, FNIC_FDMI_FC4_LEN);
+ data[2] = 1;
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES,
+ FNIC_FDMI_FC4_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS,
+ FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(port_speed_bm, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED,
+ FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes);
+
+ put_unaligned_be32(FNIC_FDMI_MFS, data);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE,
+ FNIC_FDMI_MFS_LEN, data, &attr_off_bytes);
+
+ snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
+ fnic->host->host_no);
+ strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
+ FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "OS name set <%s>, off=%d", data, attr_off_bytes);
+
+ sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
+ strscpy_pad(data, fc_host_system_hostname(fnic->host),
+ FNIC_FDMI_HN_LEN);
+ fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
+ FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Host name set <%s>, off=%d", data, attr_off_bytes);
+
+ len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes;
+ frame_size += len;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid,
+ oxid, frame_size);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+ iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING;
+}
+
+void fdls_fabric_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d",
+ iport->fabric.timer_pending, iport->fabric.state,
+ iport->fabric.retry_counter, iport->max_flogi_retries);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (!iport->fabric.timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->fabric.del_timer_inprogress) {
+ iport->fabric.del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fabric_del_timer inprogress(%d). Skip timer cb",
+ iport->fabric.del_timer_inprogress);
+ return;
+ }
+
+ iport->fabric.timer_pending = 0;
+
+ /* The fabric state indicates which frames have time out, and we retry */
+ switch (iport->fabric.state) {
+ case FDLS_STATE_FABRIC_FLOGI:
+ /* Flogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_flogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_flogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Flogi has time out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_flogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_flogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ }
+ break;
+ case FDLS_STATE_FABRIC_PLOGI:
+ /* Plogi received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_fabric_plogi(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* Plogi has timed out 2*ed_tov send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out
+ * Mark the OXID to be freed after 2 * r_a_tov and retry the req
+ */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < iport->max_plogi_retries) {
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+ fdls_send_fabric_plogi(iport);
+ } else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ }
+ break;
+ case FDLS_STATE_RPN_ID:
+ /* Rpn_id received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_rpn_id(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* RPN has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_SCR:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_scr(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* scr has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport);
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_TYPES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_types(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* RFT_ID timed out send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI: %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_REGISTER_FC4_FEATURES:
+ /* scr received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_register_fc4_features(iport);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED))
+ /* SCR has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timed out. Starting PLOGI %p", iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FDLS_STATE_RSCN_GPN_FT:
+ case FDLS_STATE_SEND_GPNFT:
+ case FDLS_STATE_GPN_FT:
+ /* GPN_FT received a LS_RJT with busy we retry from here */
+ if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME)
+ && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) {
+ iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ /* gpn_ft has timed out. Send abts */
+ fdls_send_fabric_abts(iport);
+ } else {
+ /* ABTS has timed out */
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) {
+ fdls_send_gpn_ft(iport, iport->fabric.state);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ABTS timeout for fabric GPN_FT. Check name server: %p",
+ iport);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+
+ iport->fabric.fdmi_pending = 0;
+ /* If max retries not exhausted, start over from fdmi plogi */
+ if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) {
+ iport->fabric.fdmi_retry++;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Retry FDMI PLOGI. FDMI retry: %d",
+ iport->fabric.fdmi_retry);
+ fdls_send_fdmi_plogi(iport);
+ }
+}
+
+void fdls_fdmi_timer_callback(struct timer_list *t)
+{
+ struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer);
+ struct fnic_iport_s *iport =
+ container_of(fabric, struct fnic_iport_s, fabric);
+ struct fnic *fnic = iport->fnic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ if (!iport->fabric.fdmi_pending) {
+ /* timer expired after fdmi responses received. */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* if not abort pending, send an abort */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
+ fdls_send_fdmi_abts(iport);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ /* ABTS pending for an active fdmi request that is pending.
+ * That means FDMI ABTS timed out
+ * Schedule to free the OXID after 2*r_a_tov and proceed
+ */
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi);
+ } else {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba);
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+
+ fdls_fdmi_retry_plogi(iport);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport)
+{
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_tport_event_s *tport_del_evt;
+
+ tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_del_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for tport event fcid: 0x%x",
+ tport->fcid);
+ return;
+ }
+ tport_del_evt->event = TGT_EV_TPORT_DELETE;
+ tport_del_evt->arg1 = (void *) tport;
+ list_add_tail(&tport_del_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+static void fdls_tport_timer_callback(struct timer_list *t)
+{
+ struct fnic_tport_s *tport = from_timer(tport, t, retry_timer);
+ struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!tport->timer_pending) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+
+ if (tport->del_timer_inprogress) {
+ tport->del_timer_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n",
+ tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d",
+ tport->fcid, tport->timer_pending, tport->state,
+ tport->retry_counter);
+
+ tport->timer_pending = 0;
+ oxid = tport->active_oxid;
+
+ /* We retry plogi/prli/adisc frames depending on the tport state */
+ switch (tport->state) {
+ case FDLS_TGT_STATE_PLOGI:
+ /* PLOGI frame received a LS_RJT with busy, we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_plogi(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* Plogi frame has timed out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else if (tport->retry_counter < iport->max_plogi_retries) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ } else {
+ /* exceeded plogi retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ case FDLS_TGT_STATE_PRLI:
+ /* PRLI received a LS_RJT with busy , hence we retry from here */
+ if ((tport->flags & FNIC_FDLS_RETRY_FRAME)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ tport->flags &= ~FNIC_FDLS_RETRY_FRAME;
+ fdls_send_tgt_prli(iport, tport);
+ } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ /* PRLI has time out, send abts */
+ fdls_send_tport_abts(iport, tport);
+ } else {
+ /* ABTS has timed out for prli, we go back to PLOGI */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ }
+ break;
+ case FDLS_TGT_STATE_ADISC:
+ /* ADISC timed out send an ABTS */
+ if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ fdls_send_tport_abts(iport, tport);
+ } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ /*
+ * ABTS has timed out
+ */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ } else {
+ /* exceeded retry count */
+ fdls_schedule_oxid_free(iport, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_send_delete_tport_msg(tport);
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state);
+ break;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+static void fnic_fdls_start_flogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_flogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI);
+ iport->fabric.flags = 0;
+}
+
+static void fnic_fdls_start_plogi(struct fnic_iport_s *iport)
+{
+ iport->fabric.retry_counter = 0;
+ fdls_send_fabric_plogi(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI);
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) {
+ /* we can do FDMI at the same time */
+ iport->fabric.fdmi_retry = 0;
+ timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback,
+ 0);
+ fdls_send_fdmi_plogi(iport);
+ iport->flags |= FNIC_FDMI_ACTIVE;
+ }
+}
+static void
+fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint16_t oxid;
+ struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Tgt ADISC response tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->state != FDLS_TGT_STATE_ADISC)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping this ADISC response");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport state: %d tport state: %d Is abort issued on PRLI? %d",
+ iport->state, tport->state,
+ (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED));
+ return;
+ }
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame from target: 0x%x",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (adisc_rsp->els.adisc_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts);
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n",
+ tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+ tport->retry_counter = 0;
+ frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn);
+ frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn);
+ if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC accepted from target: 0x%x. Target logged in",
+ tgt_fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Error mismatch frame: ADISC");
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /* Retry ADISC again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ADISC returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ }
+ break;
+ }
+}
+static void
+fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ uint16_t max_payload_size;
+ struct fnic *fnic = iport->fnic;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS processing target PLOGI response: tgt_fcid: 0x%x",
+ tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ return;
+ }
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport state: %d tport state: %d",
+ iport->state, tport->state);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response from target: 0x%x. Dropping frame",
+ tgt_fcid);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI accepted by target: 0x%x", tgt_fcid);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+ /* Retry plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+ fdls_delete_tport(iport, tport);
+ return;
+
+ default:
+ atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI not accepted from target fcid: 0x%x",
+ tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PLOGI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+
+ tport->timer_pending = 0;
+ tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els));
+ tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els));
+
+ /* Learn the Service Params */
+
+ /* Max frame size - choose the lowest */
+ max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp);
+ tport->max_payload_size =
+ min(max_payload_size, iport->max_payload_size);
+
+ if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MFS: tport max frame size below spec bounds: %d",
+ tport->max_payload_size);
+ tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "MAX frame size: %u iport max_payload_size: %d tport mfs: %d",
+ max_payload_size, iport->max_payload_size,
+ tport->max_payload_size);
+
+ tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp);
+
+ tport->retry_counter = 0;
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI);
+ fdls_send_tgt_prli(iport, tport);
+}
+static void
+fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint16_t oxid;
+ struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_tport_event_s *tport_add_evt;
+ struct fnic *fnic = iport->fnic;
+ bool mismatched_tgt = false;
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process tgt PRLI response: 0x%x", tgt_fcid);
+
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport not found: 0x%x", tgt_fcid);
+ /* Handle or just drop? */
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x",
+ iport->state, tport->state, tport->fcid);
+ return;
+ }
+
+ if (tport->state != FDLS_TGT_STATE_PRLI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus");
+ fdls_target_restart_nexus(tport);
+ return;
+ }
+
+ if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping PRLI response from target: 0x%x ",
+ tgt_fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery");
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+
+ switch (prli_rsp->els_prli.prli_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI accepted from target: 0x%x", tgt_fcid);
+
+ if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "mismatched target zoned with FC SCSI initiator: 0x%x",
+ tgt_fcid);
+ mismatched_tgt = true;
+ }
+ if (mismatched_tgt) {
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (tport->retry_counter < FDLS_RETRY_COUNT)) {
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x",
+ tgt_fcid);
+
+ /*Retry Plogi again from the timer routine. */
+ tport->flags |= FNIC_FDLS_RETRY_FRAME;
+ return;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI returned ELS_LS_RJT from target: 0x%x",
+ tgt_fcid);
+
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ default:
+ atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI not accepted from target: 0x%x", tgt_fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Found the PRLI target: 0x%x and state: %d",
+ (unsigned int) tgt_fcid, tport->state);
+
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ tport->timer_pending = 0;
+
+ /* Learn Service Params */
+ tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params);
+ tport->retry_counter = 0;
+
+ if (tport->fcp_csp & FCP_SPPF_RETRY)
+ tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY;
+
+ /* Check if the device plays Target Mode Function */
+ if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remote port(0x%x): no target support. Deleting it\n",
+ tgt_fcid);
+ fdls_tgt_logout(iport, tport);
+ fdls_delete_tport(iport, tport);
+ return;
+ }
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_READY);
+
+ /* Inform the driver about new target added */
+ tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC);
+ if (!tport_add_evt) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport event memory allocation failure: 0x%0x\n",
+ tport->fcid);
+ return;
+ }
+ tport_add_evt->event = TGT_EV_RPORT_ADD;
+ tport_add_evt->arg1 = (void *) tport;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x add tport event fcid: 0x%x\n",
+ tport->fcid, iport->fcid);
+ list_add_tail(&tport_add_evt->links, &fnic->tport_event_list);
+ queue_work(fnic_event_queue, &fnic->tport_work);
+}
+
+
+static void
+fdls_process_rff_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_set_state((&iport->fabric), FDLS_STATE_SCR);
+ fdls_send_scr(iport);
+ break;
+ case FC_FS_RJT:
+ reason_code = rff_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF_ID returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rft_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_features(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rft_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d",
+ iport->fcid, reason_code,
+ rft_rsp->fc_std_ct_hdr.ct_explan);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_rpn_id_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid,
+ (uint32_t) rsp);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (rsp) {
+ case FC_FS_ACC:
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ fdls->retry_counter = 0;
+ fdls_send_register_fc4_types(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES);
+ break;
+ case FC_FS_RJT:
+ reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason;
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID returned REJ BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RPN_ID ELS_LS_RJT. Halting discovery %p", iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+fdls_process_scr_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process SCR response: 0x%04x",
+ (uint32_t) scr_rsp->scr.scr_cmd);
+
+ if (fdls_get_state(fdls) != FDLS_STATE_SCR) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR resp recvd in state(%d). Dropping.",
+ fdls_get_state(fdls));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (scr_rsp->scr.scr_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT);
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fdls->timer_pending = 0;
+ fdls->retry_counter = 0;
+ }
+ break;
+
+ default:
+ atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fc_gpn_ft_rsp_iu *gpn_ft_tgt;
+ struct fnic_tport_s *tport, *next;
+ uint32_t fcid;
+ uint64_t wwpn;
+ int rem_len = len;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS process GPN_FT tgt list", iport->fcid);
+
+ gpn_ft_tgt =
+ (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr +
+ sizeof(struct fc_frame_header)
+ + sizeof(struct fc_ct_hdr));
+ len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr);
+
+ while (rem_len > 0) {
+
+ fcid = ntoh24(gpn_ft_tgt->fcid);
+ wwpn = be64_to_cpu(gpn_ft_tgt->wwpn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl);
+
+ if (fcid == iport->fcid) {
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ continue;
+ }
+
+ tport = fnic_find_tport_by_wwpn(iport, wwpn);
+ if (!tport) {
+ /*
+ * New port registered with the switch or first time query
+ */
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+ /*
+ * check if this was an existing tport with same fcid
+ * but whose wwpn has changed now ,then remove it and
+ * create a new one
+ */
+ if (tport->fcid != fcid) {
+ fdls_delete_tport(iport, tport);
+ tport = fdls_create_tport(iport, fcid, wwpn);
+ if (!tport)
+ return;
+ }
+
+ /*
+ * If this GPN_FT rsp is after RSCN then mark the tports which
+ * matches with the new GPN_FT list, if some tport is not
+ * found in GPN_FT we went to delete that tport later.
+ */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT)
+ tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+
+ if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST)
+ break;
+
+ gpn_ft_tgt++;
+ rem_len -= sizeof(struct fc_gpn_ft_rsp_iu);
+ }
+ if (rem_len <= 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d",
+ len, rem_len);
+}
+
+ /*remove those ports which was not listed in GPN_FT */
+ if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+
+ if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove port: 0x%x not found in GPN_FT list",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ } else {
+ tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST;
+ }
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ }
+ }
+}
+
+static void
+fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, int len)
+{
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr;
+ uint16_t rsp;
+ uint8_t reason_code;
+ int count = 0;
+ struct fnic_tport_s *tport, *next;
+ u32 old_link_down_cnt = iport->fnic->link_down_cnt;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process GPN_FT response: iport state: %d len: %d",
+ iport->state, len);
+
+ /*
+ * GPNFT response :-
+ * FDLS_STATE_GPN_FT : GPNFT send after SCR state
+ * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC)
+ * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN
+ * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target,
+ * e.g. after receiving Target LOGO
+ * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress
+ * from previous GPNFT response,a new GPNFT response has come.
+ */
+ if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC)
+ && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT))
+ || ((iport->state == FNIC_IPORT_STATE_READY)
+ && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT)
+ || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT)
+ || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.",
+ fdls_get_state(fdls), iport->state);
+ return;
+ }
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ iport->state = FNIC_IPORT_STATE_READY;
+ rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr));
+
+ switch (rsp) {
+
+ case FC_FS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP accept", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_process_gpn_ft_tgt_list(iport, fchdr, len);
+
+ /*
+ * iport state can change only if link down event happened
+ * We don't need to undo fdls_process_gpn_ft_tgt_list,
+ * that will be taken care in next link up event
+ */
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Halting target discovery: fab st: %d iport st: %d ",
+ fdls_get_state(fdls), iport->state);
+ break;
+ }
+ fdls_tgt_discovery_start(iport);
+ break;
+
+ case FC_FS_RJT:
+ reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code);
+
+ if (((reason_code == FC_FS_RJT_BSY)
+ || (reason_code == FC_FS_RJT_UNABL))
+ && (fdls->retry_counter < FDLS_RETRY_COUNT)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine",
+ iport->fcid);
+ /* Retry again from the timer routine */
+ fdls->flags |= FNIC_FDLS_RETRY_FRAME;
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: GPNFT_RSP reject", iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ /*
+ * If GPN_FT ls_rjt then we should delete
+ * all existing tports
+ */
+ count = 0;
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Remove port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ if ((old_link_down_cnt != iport->fnic->link_down_cnt)
+ || (iport->state != FNIC_IPORT_STATE_READY)) {
+ return;
+ }
+ count++;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN_FT_REJECT: Removed (0x%x) ports", count);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf
+ * @iport: Handle to fnic iport
+ * @fchdr: Incoming frame
+ */
+static void
+fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogo_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response. Fabric not in LOGO state. Dropping! %p",
+ iport);
+ return;
+ }
+
+ iport->fabric.state = FDLS_STATE_FLOGO_DONE;
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT",
+ ntoh24(fchdr->fh_d_id));
+ return;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGO response not accepted or rejected: 0x%x",
+ flogo_rsp->els.fl_cmd);
+ }
+}
+
+static void
+fdls_process_flogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr, void *rx_frame)
+{
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr;
+ uint8_t *fcid;
+ uint16_t rdf_size;
+ uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 };
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing FLOGI response", iport->fcid);
+
+ if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (flogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fcid = FNIC_STD_GET_D_ID(fchdr);
+ iport->fcid = ntoh24(fcid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI response accepted", iport->fcid);
+
+ /* Learn the Service Params */
+ rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els));
+ if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE)
+ && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN))
+ iport->max_payload_size = min(rdf_size,
+ iport->max_payload_size);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "max_payload_size from fabric: %u set: %d", rdf_size,
+ iport->max_payload_size);
+
+ iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els));
+ iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els));
+
+ if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC)
+ iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "From fabric: R_A_TOV: %d E_D_TOV: %d",
+ iport->r_a_tov, iport->e_d_tov);
+
+ fc_host_fabric_name(iport->fnic->host) =
+ get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els));
+ fc_host_port_id(iport->fnic->host) = iport->fcid;
+
+ fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FLOGI registration failed", iport->fcid);
+ break;
+ }
+
+ memcpy(&fcmac[3], fcid, 3);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
+ vnic_dev_add_addr(iport->fnic->vdev, fcmac);
+
+ if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) {
+ fnic_fdls_start_plogi(iport);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received. Starting PLOGI");
+ } else {
+ /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to
+ * FDLS_STATE_LINKDOWN
+ * state, hence we don't have to worry about undoing:
+ * the fnic_fdls_register_portid and vnic_dev_add_addr
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(fabric));
+ }
+ break;
+
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects);
+ if (fabric->retry_counter < iport->max_flogi_retries) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p",
+ iport);
+
+ /* Retry Flogi again from the timer routine. */
+ fabric->flags |= FNIC_FDLS_RETRY_FRAME;
+
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI returned ELS_LS_RJT. Halting discovery %p",
+ iport);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport 0x%p Canceling fabric disc timer\n",
+ iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ fabric->timer_pending = 0;
+ fabric->retry_counter = 0;
+ }
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI response not accepted: 0x%x",
+ flogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects);
+ break;
+ }
+}
+
+static void
+fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric PLOGI response received in state (%d). Dropping frame",
+ fdls_get_state(&iport->fabric));
+ return;
+ }
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x fabric PLOGI response: Accepted\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID);
+ fdls_send_rpn_id(iport);
+ break;
+ case ELS_LS_RJT:
+ atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects);
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.retry_counter < iport->max_plogi_retries)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine",
+ iport->fcid);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery",
+ iport->fcid);
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x Canceling fabric disc timer\n",
+ iport->fcid);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.retry_counter = 0;
+ return;
+ }
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI response not accepted: 0x%x",
+ plogi_rsp->els.fl_cmd);
+ atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects);
+ break;
+ }
+}
+
+static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr;
+ struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ u64 fdmi_tov;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if (iport->active_oxid_fdmi_plogi != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n",
+ fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi);
+ return;
+ }
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+
+ if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ switch (plogi_rsp->els.fl_cmd) {
+ case ELS_LS_ACC:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Sending fdmi registration for port 0x%x\n",
+ iport->fcid);
+
+ fdls_fdmi_register_hba(iport);
+ fdls_fdmi_register_pa(iport);
+ fdmi_tov = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&iport->fabric.fdmi_timer,
+ round_jiffies(fdmi_tov));
+ break;
+ case ELS_LS_RJT:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x",
+ els_rjt->rej.er_reason);
+
+ if (((els_rjt->rej.er_reason == ELS_RJT_BUSY)
+ || (els_rjt->rej.er_reason == ELS_RJT_UNAB))
+ && (iport->fabric.fdmi_retry < 7)) {
+ iport->fabric.fdmi_retry++;
+ fdls_send_fdmi_plogi(iport);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr,
+ int rsp_type)
+{
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ if (!iport->fabric.fdmi_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received FDMI ack while not waiting: 0x%x\n",
+ FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ if ((iport->active_oxid_fdmi_rhba != oxid) &&
+ (iport->active_oxid_fdmi_rpa != oxid)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n",
+ oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa);
+ return;
+ }
+ if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ } else {
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING;
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Received FDMI registration ack\n",
+ iport->fcid);
+
+ if (!iport->fabric.fdmi_pending) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport fcid: 0x%x: Canceling FDMI timer\n",
+ iport->fcid);
+ }
+}
+
+static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr));
+
+ if (!(s_id != FC_FID_MGMT_SERV)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ switch (FNIC_FRAME_TYPE(oxid)) {
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING;
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING;
+
+ /* If RPA is still pending, don't turn off ABORT PENDING.
+ * We count on the timer to detect the ABTS timeout and take
+ * corrective action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING))
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ break;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING;
+
+ /* If RHBA is still pending, don't turn off ABORT PENDING.
+ * We count on the timer to detect the ABTS timeout and take
+ * corrective action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING))
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ break;
+ }
+
+ /*
+ * Only if ABORT PENDING is off, delete the timer, and if no other
+ * operations are pending, retry FDMI.
+ * Otherwise, let the timer pop and take the appropriate action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ if (!iport->fabric.fdmi_pending)
+ fdls_fdmi_retry_plogi(iport);
+ }
+}
+
+static void
+fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint32_t fabric_state = iport->fabric.state;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+ uint16_t oxid;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr;
+
+ if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI)
+ || (s_id == FC_FID_FCTRL))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid SID: 0x%x. Dropping frame",
+ s_id);
+ return;
+ }
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ if (iport->active_oxid_fabric_req != oxid) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp with invalid oxid: 0x%x. Dropping frame",
+ oxid);
+ return;
+ }
+
+ if (iport->fabric.timer_pending) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Canceling fabric disc timer %p\n", iport);
+ fnic_del_fabric_timer_sync(fnic);
+ }
+ iport->fabric.timer_pending = 0;
+ iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
+
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x",
+ fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id));
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x",
+ fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr),
+ ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan);
+ }
+
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+
+ /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (iport->fabric.retry_counter < iport->max_flogi_retries)
+ fdls_send_fabric_flogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max FLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY)
+ fdls_send_fabric_logo(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (iport->fabric.retry_counter < iport->max_plogi_retries)
+ fdls_send_fabric_plogi(iport);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Exceeded max PLOGI retries");
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_rpn_id(iport);
+ else
+ /* go back to fabric Plogi */
+ fnic_fdls_start_plogi(iport);
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_scr(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "SCR exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_types(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if (iport->fabric.retry_counter < FDLS_RETRY_COUNT)
+ fdls_send_register_fc4_features(iport);
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RFF exhausted retries. Start fabric PLOGI %p",
+ iport);
+ fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */
+ }
+ break;
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT)
+ fdls_send_gpn_ft(iport, fabric_state);
+ else
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "GPN FT exhausted retries. Start fabric PLOGI %p",
+ iport);
+ break;
+ default:
+ /*
+ * We should not be here since we already validated rx oxid with
+ * our active_oxid_fabric_req
+ */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid OXID/active oxid 0x%x\n", oxid);
+ WARN_ON(true);
+ return;
+ }
+}
+
+static void
+fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_abts_ba_acc *pba_acc;
+ uint32_t nport_id;
+ uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr);
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_abts_ba_acc);
+
+ nport_id = ntoh24(fchdr->fh_s_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received abort from SID 0x%8x", nport_id);
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+ if (tport) {
+ if (tport->active_oxid == oxid) {
+ tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ }
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "0x%x: Failed to allocate frame to send response for ABTS req",
+ iport->fcid);
+ return;
+ }
+
+ pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ *pba_acc = (struct fc_std_abts_ba_acc) {
+ .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC,
+ .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}},
+ .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)}
+ };
+
+ FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id);
+ FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr));
+
+ pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr));
+ pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr));
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send BA ACC with oxid: 0x%x",
+ iport->fcid, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_unsupported_els_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pls_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS with illegal frame bits 0x%x\n",
+ d_id);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping unsupported ELS request in iport state: %d",
+ iport->state);
+ atomic64_inc(&iport->iport_stats.unsupported_frames_dropped);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to unsupported ELS request");
+ return;
+ }
+
+ pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process unsupported ELS request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support this ELS request, send a reject */
+ pls_rsp->rej.er_reason = 0x0B;
+ pls_rsp->rej.er_explan = 0x0;
+ pls_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_rls_acc *prls_acc_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_rls_acc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process RLS request %d", iport->fnic->fnic_num);
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RLS req in iport state: %d. Dropping the frame.",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send RLS accept");
+ return;
+ }
+ prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID);
+
+ FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS);
+
+ prls_acc_rsp->els.rls_cmd = ELS_LS_ACC;
+ prls_acc_rsp->els.rls_lesb.lesb_link_fail =
+ cpu_to_be32(iport->fnic->link_down_cnt);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr,
+ uint32_t len)
+{
+ uint8_t *frame;
+ struct fc_std_els_acc_rsp *pels_acc;
+ uint16_t oxid;
+ uint8_t *fc_payload;
+ uint8_t type;
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET;
+
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+
+ if ((iport->state != FNIC_IPORT_STATE_READY)
+ && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping ELS frame type: 0x%x in iport state: %d",
+ type, iport->state);
+ return;
+ }
+ switch (type) {
+ case ELS_ECHO:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for ECHO request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ case ELS_RRQ:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for RRQ request %d\n",
+ iport->fnic->fnic_num);
+ break;
+
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "sending LS_ACC for 0x%x ELS frame\n", type);
+ break;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ELS response for 0x%x",
+ type);
+ return;
+ }
+
+ if (type == ELS_ECHO) {
+ /* Brocade sends a longer payload, copy all frame back */
+ memcpy(frame, fchdr, len);
+ }
+
+ pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_acc_frame(frame, iport);
+
+ FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid);
+
+ if (type == ELS_ECHO)
+ frame_size += len;
+ else
+ frame_size += sizeof(struct fc_std_els_acc_rsp);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint32_t s_id;
+ struct fnic_tport_s *tport;
+ uint32_t tport_state;
+ struct fc_std_abts_ba_acc *ba_acc;
+ struct fc_std_abts_ba_rjt *ba_rjt;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ ba_acc = (struct fc_std_abts_ba_acc *)fchdr;
+ ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr;
+
+ tport = fnic_find_tport_by_fcid(iport, s_id);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp with invalid SID: 0x%x", s_id);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport 0x%p Canceling fabric disc timer\n", tport);
+ fnic_del_tport_timer_sync(fnic, tport);
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt abts rsp in iport state(%d). Dropping.",
+ iport->state);
+ return;
+ }
+ tport->timer_pending = 0;
+ tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED;
+ tport_state = tport->state;
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+
+ /*This abort rsp is for ADISC */
+ frame_type = FNIC_FRAME_TYPE(oxid);
+ switch (frame_type) {
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC",
+ be16_to_cpu(ba_acc->acc.ba_ox_id),
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ",
+ tport->fcid, tport_state);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation:0x%x ",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_adisc(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC not responding. Deleting target port: 0x%x",
+ tport->fcid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < iport->max_plogi_retries)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport);
+ return;
+ }
+
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ /* Restart discovery of targets */
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (iport->fabric.state != FDLS_STATE_SEND_GPNFT)
+ && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) {
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ break;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Received tgt PRLI abts response BA_ACC",
+ tport->fcid);
+ } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ",
+ tport->fcid, FNIC_STD_GET_OX_ID(fchdr));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "reason code: 0x%x reason code explanation: 0x%x",
+ ba_rjt->rjt.br_reason,
+ ba_rjt->rjt.br_explan);
+ }
+ if ((tport->retry_counter < FDLS_RETRY_COUNT)
+ && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) {
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_prli(iport, tport);
+ return;
+ }
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_send_tgt_plogi(iport, tport); /* go back to plogi */
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS response for unknown frame %p", iport);
+ break;
+ }
+
+}
+
+static void
+fdls_process_plogi_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t *frame;
+ struct fc_std_els_rjt_rsp *pplogi_rsp;
+ uint16_t oxid;
+ uint32_t d_id = ntoh24(fchdr->fh_d_id);
+ struct fnic *fnic = iport->fnic;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+
+ if (iport->fcid != d_id) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI with illegal frame bits. Dropping frame from 0x%x",
+ d_id);
+ return;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received PLOGI request in iport state: %d Dropping frame",
+ iport->state);
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send response to PLOGI request");
+ return;
+ }
+
+ pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(frame, iport);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: Process PLOGI request from SID: 0x%x",
+ iport->fcid, ntoh24(fchdr->fh_s_id));
+
+ /* We don't support PLOGI request, send a reject */
+ pplogi_rsp->rej.er_reason = 0x0B;
+ pplogi_rsp->rej.er_explan = 0x0;
+ pplogi_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, frame, frame_size);
+}
+
+static void
+fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_logo *logo = (struct fc_std_logo *)fchdr;
+ uint32_t nport_id;
+ uint64_t nport_name;
+ struct fnic_tport_s *tport;
+ struct fnic *fnic = iport->fnic;
+ uint16_t oxid;
+
+ nport_id = ntoh24(logo->els.fl_n_port_id);
+ nport_name = be64_to_cpu(logo->els.fl_n_port_wwn);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process LOGO request from fcid: 0x%x", nport_id);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping LOGO req from 0x%x in iport state: %d",
+ nport_id, iport->state);
+ return;
+ }
+
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ if (!tport) {
+ /* We are not logged in with the nport, log and drop... */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO from an nport not logged in: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->fcid != nport_id) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Received LOGO with invalid target port fcid: 0x%x(0x%llx)",
+ nport_id, nport_name);
+ return;
+ }
+ if (tport->timer_pending) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport fcid 0x%x: Canceling disc timer\n",
+ tport->fcid);
+ fnic_del_tport_timer_sync(fnic, tport);
+ tport->timer_pending = 0;
+ }
+
+ /* got a logo in response to adisc to a target which has logged out */
+ if (tport->state == FDLS_TGT_STATE_ADISC) {
+ tport->retry_counter = 0;
+ oxid = tport->active_oxid;
+ fdls_free_oxid(iport, oxid, &tport->active_oxid);
+ fdls_delete_tport(iport, tport);
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((iport->state == FNIC_IPORT_STATE_READY)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT)
+ && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ return;
+ }
+ } else {
+ fdls_delete_tport(iport, tport);
+ }
+ if (iport->state == FNIC_IPORT_STATE_READY) {
+ fdls_send_logo_resp(iport, &logo->fchdr);
+ if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) &&
+ (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Sending GPNFT in response to LOGO from Target:0x%x",
+ nport_id);
+ fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT);
+ }
+ }
+}
+
+static void
+fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr)
+{
+ struct fc_std_rscn *rscn;
+ struct fc_els_rscn_page *rscn_port = NULL;
+ int num_ports;
+ struct fnic_tport_s *tport, *next;
+ uint32_t nport_id;
+ uint8_t fcid[3];
+ int newports = 0;
+ struct fnic_fdls_fabric_s *fdls = &iport->fabric;
+ struct fnic *fnic = iport->fnic;
+ int rscn_type = NOT_PC_RSCN;
+ uint32_t sid = ntoh24(fchdr->fh_s_id);
+ unsigned long reset_fnic_list_lock_flags = 0;
+ uint16_t rscn_payload_len;
+
+ atomic64_inc(&iport->iport_stats.num_rscns);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN %p", iport);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS RSCN received in state(%d). Dropping",
+ fdls_get_state(fdls));
+ return;
+ }
+
+ rscn = (struct fc_std_rscn *)fchdr;
+ rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen);
+
+ /* frame validation */
+ if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8)
+ || (rscn_payload_len > 1024)
+ || (rscn->els.rscn_page_len != 4)) {
+ num_ports = 0;
+ if ((rscn_payload_len == 0xFFFF)
+ && (sid == FC_FID_FCTRL)) {
+ rscn_type = PC_RSCN;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x",
+ sid, rscn_payload_len);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN payload_len: 0x%x page_len: 0x%x",
+ rscn_payload_len, rscn->els.rscn_page_len);
+ /* if this happens then we need to send ADISC to all the tports. */
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ } /* end else */
+ } else {
+ num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len;
+ rscn_port = (struct fc_els_rscn_page *)(rscn + 1);
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN received for num_ports: %d payload_len: %d page_len: %d ",
+ num_ports, rscn_payload_len, rscn->els.rscn_page_len);
+
+ /*
+ * RSCN have at least one Port_ID page , but may not have any port_id
+ * in it. If no port_id is specified in the Port_ID page , we send
+ * ADISC to all the tports
+ */
+
+ while (num_ports) {
+
+ memcpy(fcid, rscn_port->rscn_fid, 3);
+
+ nport_id = ntoh24(fcid);
+ rscn_port++;
+ num_ports--;
+ /* if this happens then we need to send ADISC to all the tports. */
+ if (nport_id == 0) {
+ list_for_each_entry_safe(tport, next, &iport->tport_list,
+ links) {
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN for port id: 0x%x", tport->fcid);
+ }
+ break;
+ }
+ tport = fnic_find_tport_by_fcid(iport, nport_id);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "RSCN port id list: 0x%x", nport_id);
+
+ if (!tport) {
+ newports++;
+ continue;
+ }
+ if (tport->state == FDLS_TGT_STATE_READY)
+ tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC;
+ }
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON &&
+ rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) {
+
+ if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCRSCN handling already in progress. Skip host reset: %d",
+ iport->fnic->fnic_num);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing PCRSCN. Queuing fnic for host reset: %d",
+ iport->fnic->fnic_num);
+ fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+ list_add_tail(&fnic->links, &reset_fnic_list);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ queue_work(reset_fnic_work_queue, &reset_fnic_work);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDLS process RSCN sending GPN_FT: newports: %d", newports);
+ fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT);
+ fdls_send_rscn_resp(iport, fchdr);
+ }
+}
+
+void fnic_fdls_disc_start(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+
+ fc_host_fabric_name(iport->fnic->host) = 0;
+ fc_host_post_event(iport->fnic->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
+
+ if (!iport->usefip) {
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+ fnic_fdls_start_flogi(iport);
+ } else
+ fnic_fdls_start_plogi(iport);
+}
+
+static void
+fdls_process_adisc_req(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ struct fc_std_els_adisc *padisc_acc;
+ struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr;
+ uint64_t frame_wwnn;
+ uint64_t frame_wwpn;
+ uint32_t tgt_fcid;
+ struct fnic_tport_s *tport;
+ uint8_t *fcid;
+ uint8_t *rjt_frame;
+ uint8_t *acc_frame;
+ struct fc_std_els_rjt_rsp *prjts_rsp;
+ uint16_t oxid;
+ struct fnic *fnic = iport->fnic;
+ uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_rjt_rsp);
+ uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_std_els_adisc);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Process ADISC request %d", iport->fnic->fnic_num);
+
+ fcid = FNIC_STD_GET_S_ID(fchdr);
+ tgt_fcid = ntoh24(fcid);
+ tport = fnic_find_tport_by_fcid(iport, tgt_fcid);
+ if (!tport) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport for fcid: 0x%x not found. Dropping ADISC req.",
+ tgt_fcid);
+ return;
+ }
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dropping ADISC req from fcid: 0x%x in iport state: %d",
+ tgt_fcid, iport->state);
+ return;
+ }
+
+ frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn);
+ frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn);
+
+ if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) {
+ /* send reject */
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx",
+ tgt_fcid, frame_wwpn, frame_wwnn);
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT",
+ tport->wwpn, tport->wwnn);
+
+ rjt_frame = fdls_alloc_frame(iport);
+ if (rjt_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate rjt_frame to send response to ADISC request");
+ return;
+ }
+
+ prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ fdls_init_els_rjt_frame(rjt_frame, iport);
+
+ prjts_rsp->rej.er_reason = 0x03; /* logical error */
+ prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */
+ prjts_rsp->rej.er_vendor = 0x0;
+
+ FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id);
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid);
+
+ fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size);
+ return;
+ }
+
+ acc_frame = fdls_alloc_frame(iport);
+ if (acc_frame == NULL) {
+ FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send ADISC accept");
+ return;
+ }
+
+ padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+
+ FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id);
+ FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id);
+
+ FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16);
+ FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP);
+ FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid);
+ FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID);
+
+ padisc_acc->els.adisc_cmd = ELS_LS_ACC;
+
+ FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn,
+ iport->wwnn);
+ memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3);
+
+ fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size);
+}
+
+/*
+ * Performs a validation for all FCOE frames and return the frame type
+ */
+int
+fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr)
+{
+ uint8_t type;
+ uint8_t *fc_payload;
+ uint16_t oxid;
+ uint32_t s_id;
+ uint32_t d_id;
+ struct fnic *fnic = iport->fnic;
+ struct fnic_fdls_fabric_s *fabric = &iport->fabric;
+ int oxid_frame_type;
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header);
+ type = *fc_payload;
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ /* some common validation */
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
+ }
+ }
+
+ /* BLS ABTS response */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
+ || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) {
+ if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS invalid frame. Dropping frame");
+ return -1;
+
+ }
+ if (fdls_is_oxid_fabric_req(oxid)) {
+ if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+ return FNIC_FABRIC_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_fdmi_req(oxid)) {
+ return FNIC_FDMI_BLS_ABTS_RSP;
+ } else if (fdls_is_oxid_tgt_req(oxid)) {
+ return FNIC_TPORT_BLS_ABTS_RSP;
+ }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame",
+ oxid, s_id);
+ return -1;
+ }
+
+ /* BLS ABTS Req */
+ if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS)
+ && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Receiving Abort Request from s_id: 0x%x", s_id);
+ return FNIC_BLS_ABTS_REQ;
+ }
+
+ /* unsolicited requests frames */
+ if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) {
+ switch (type) {
+ case ELS_LOGO:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received LOGO invalid frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_ELS_LOGO_REQ;
+ case ELS_RSCN:
+ if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr))
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))
+ || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN invalid FCTL. Dropping frame");
+ return -1;
+ }
+ if (s_id != FC_FID_FCTRL)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.",
+ fchdr->fh_f_ctl[0], fchdr->fh_type, s_id);
+ return FNIC_ELS_RSCN_REQ;
+ case ELS_PLOGI:
+ return FNIC_ELS_PLOGI_REQ;
+ case ELS_ECHO:
+ return FNIC_ELS_ECHO_REQ;
+ case ELS_ADISC:
+ return FNIC_ELS_ADISC;
+ case ELS_RLS:
+ return FNIC_ELS_RLS;
+ case ELS_RRQ:
+ return FNIC_ELS_RRQ;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unsupported frame (type:0x%02x) from fcid: 0x%x",
+ type, s_id);
+ return FNIC_ELS_UNSUPPORTED_REQ;
+ }
+ }
+
+ /* solicited response from fabric or target */
+ oxid_frame_type = FNIC_FRAME_TYPE(oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid);
+ switch (oxid_frame_type) {
+ case FNIC_FRAME_TYPE_FABRIC_FLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FLOGI)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_FLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_PLOGI:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_DIR_SERV)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_PLOGI_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_SCR:
+ if (type == ELS_LS_ACC) {
+ if ((s_id != FC_FID_FCTRL)
+ || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ }
+ return FNIC_FABRIC_SCR_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RPN:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RPN_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_RFF:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_RFF_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_GPN_FT:
+ if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown frame. Dropping frame");
+ return -1;
+ }
+ return FNIC_FABRIC_GPN_FT_RSP;
+
+ case FNIC_FRAME_TYPE_FABRIC_LOGO:
+ return FNIC_FABRIC_LOGO_RSP;
+ case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ return FNIC_FDMI_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RHBA:
+ return FNIC_FDMI_REG_HBA_RSP;
+ case FNIC_FRAME_TYPE_FDMI_RPA:
+ return FNIC_FDMI_RPA_RSP;
+ case FNIC_FRAME_TYPE_TGT_PLOGI:
+ return FNIC_TPORT_PLOGI_RSP;
+ case FNIC_FRAME_TYPE_TGT_PRLI:
+ return FNIC_TPORT_PRLI_RSP;
+ case FNIC_FRAME_TYPE_TGT_ADISC:
+ return FNIC_TPORT_ADISC_RSP;
+ case FNIC_FRAME_TYPE_TGT_LOGO:
+ if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping Unknown frame in tport solicited exchange range type: 0x%x.",
+ fchdr->fh_type);
+ return -1;
+ }
+ return FNIC_TPORT_LOGO_RSP;
+ default:
+ /* Drop the Rx frame and log/stats it */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Solicited response: unknown OXID: 0x%x", oxid);
+ return -1;
+ }
+
+ return -1;
+}
+
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset)
+{
+ struct fc_frame_header *fchdr;
+ uint32_t s_id = 0;
+ uint32_t d_id = 0;
+ struct fnic *fnic = iport->fnic;
+ int frame_type;
+
+ fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset);
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+
+ fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming");
+
+ frame_type =
+ fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ /*if we are in flogo drop everything else */
+ if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO &&
+ frame_type != FNIC_FABRIC_LOGO_RSP)
+ return;
+
+ switch (frame_type) {
+ case FNIC_FABRIC_FLOGI_RSP:
+ fdls_process_flogi_rsp(iport, fchdr, rx_frame);
+ break;
+ case FNIC_FABRIC_PLOGI_RSP:
+ fdls_process_fabric_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_PLOGI_RSP:
+ fdls_process_fdmi_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RPN_RSP:
+ fdls_process_rpn_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFT_RSP:
+ fdls_process_rft_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_RFF_RSP:
+ fdls_process_rff_id_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_SCR_RSP:
+ fdls_process_scr_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_GPN_FT_RSP:
+ fdls_process_gpn_ft_rsp(iport, fchdr, len);
+ break;
+ case FNIC_TPORT_PLOGI_RSP:
+ fdls_process_tgt_plogi_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_PRLI_RSP:
+ fdls_process_tgt_prli_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_ADISC_RSP:
+ fdls_process_tgt_adisc_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_BLS_ABTS_RSP:
+ fdls_process_tgt_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_TPORT_LOGO_RSP:
+ /* Logo response from tgt which we have deleted */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Logo response from tgt: 0x%x",
+ ntoh24(fchdr->fh_s_id));
+ break;
+ case FNIC_FABRIC_LOGO_RSP:
+ fdls_process_fabric_logo_rsp(iport, fchdr);
+ break;
+ case FNIC_FABRIC_BLS_ABTS_RSP:
+ fdls_process_fabric_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_FDMI_BLS_ABTS_RSP:
+ fdls_process_fdmi_abts_rsp(iport, fchdr);
+ break;
+ case FNIC_BLS_ABTS_REQ:
+ fdls_process_abts_req(iport, fchdr);
+ break;
+ case FNIC_ELS_UNSUPPORTED_REQ:
+ fdls_process_unsupported_els_req(iport, fchdr);
+ break;
+ case FNIC_ELS_PLOGI_REQ:
+ fdls_process_plogi_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RSCN_REQ:
+ fdls_process_rscn(iport, fchdr);
+ break;
+ case FNIC_ELS_LOGO_REQ:
+ fdls_process_logo_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RRQ:
+ case FNIC_ELS_ECHO_REQ:
+ fdls_process_els_req(iport, fchdr, len);
+ break;
+ case FNIC_ELS_ADISC:
+ fdls_process_adisc_req(iport, fchdr);
+ break;
+ case FNIC_ELS_RLS:
+ fdls_process_rls_req(iport, fchdr);
+ break;
+ case FNIC_FDMI_REG_HBA_RSP:
+ case FNIC_FDMI_RPA_RSP:
+ fdls_process_fdmi_reg_ack(iport, fchdr, frame_type);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "s_id: 0x%x d_did: 0x%x", s_id, d_id);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received unknown FCoE frame of len: %d. Dropping frame", len);
+ break;
+ }
+}
+
+void fnic_fdls_disc_init(struct fnic_iport_s *iport)
+{
+ fdls_reset_oxid_pool(iport);
+ fdls_set_state((&iport->fabric), FDLS_STATE_INIT);
+}
+
+void fnic_fdls_link_down(struct fnic_iport_s *iport)
+{
+ struct fnic_tport_s *tport, *next;
+ struct fnic *fnic = iport->fnic;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS processing link down", iport->fcid);
+
+ fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN);
+ iport->fabric.flags = 0;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_scsi_fcpio_reset(iport->fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing rport: 0x%x", tport->fcid);
+ fdls_delete_tport(iport, tport);
+ }
+
+ if (fnic_fdmi_support == 1) {
+ if (iport->fabric.fdmi_pending > 0) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ }
+ iport->flags &= ~FNIC_FDMI_ACTIVE;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS finish processing link down", iport->fcid);
+}
diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h
new file mode 100644
index 000000000000..012f43afd083
--- /dev/null
+++ b/drivers/scsi/fnic/fdls_fc.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FDLS_FC_H_
+#define _FDLS_FC_H_
+
+/* This file contains the declarations for FC fabric services
+ * and target discovery
+ *
+ * Request and Response for
+ * 1. FLOGI
+ * 2. PLOGI to Fabric Controller
+ * 3. GPN_ID, GPN_FT
+ * 4. RSCN
+ * 5. PLOGI to Target
+ * 6. PRLI to Target
+ */
+
+#include <scsi/scsi.h>
+#include <scsi/fc/fc_els.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_ns.h>
+#include <uapi/scsi/fc/fc_gs.h>
+#include <uapi/linux/if_ether.h>
+#include <scsi/fc/fc_ms.h>
+#include <linux/minmax.h>
+#include <linux/if_ether.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fcoe.h>
+
+#define FDLS_MIN_FRAMES (32)
+#define FDLS_MIN_FRAME_ELEM (4)
+#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002
+#define FNIC_FCP_SP_TARGET 0x00000010
+#define FNIC_FCP_SP_INITIATOR 0x00000020
+#define FNIC_FCP_SP_CONF_CMPL 0x00000080
+#define FNIC_FCP_SP_RETRY 0x00000100
+
+#define FNIC_FC_CONCUR_SEQS (0xFF)
+#define FNIC_FC_RO_INFO (0x1F)
+
+/* Little Endian */
+#define FNIC_UNASSIGNED_OXID (0xffff)
+#define FNIC_UNASSIGNED_RXID (0xffff)
+#define FNIC_ELS_REQ_FCTL (0x000029)
+#define FNIC_ELS_REP_FCTL (0x000099)
+
+#define FNIC_FCP_RSP_FCTL (0x000099)
+#define FNIC_REQ_ABTS_FCTL (0x000009)
+
+#define FNIC_FC_PH_VER_HI (0x20)
+#define FNIC_FC_PH_VER_LO (0x20)
+#define FNIC_FC_PH_VER (0x2020)
+#define FNIC_FC_B2B_CREDIT (0x0A)
+#define FNIC_FC_B2B_RDF_SZ (0x0800)
+
+#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data)
+#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov)
+#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov)
+#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features))
+#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn)
+#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn)
+
+#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \
+ (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size))
+#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \
+ (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov))
+#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \
+ (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov))
+
+#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3)
+#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3)
+#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid))
+#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid))
+
+#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl)
+#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type)
+#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \
+ put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl))
+
+#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr)
+#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr)
+#define FNIC_STD_SET_PORT_ID(__req, __portid) \
+ memcpy(__req.fr_fid.fp_fid, __portid, 3)
+#define FNIC_STD_SET_PORT_NAME(_req, _pName) \
+ (put_unaligned_be64(_pName, &_req.fr_wwn))
+
+#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id))
+#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id))
+#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id)
+#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id)
+#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type)
+#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl)
+#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl)
+
+#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd))
+
+#define FNIC_FCOE_MAX_FRAME_SZ (2048)
+#define FNIC_FCOE_MIN_FRAME_SZ (280)
+#define FNIC_FC_MAX_PAYLOAD_LEN (2048)
+#define FNIC_MIN_DATA_FIELD_SIZE (256)
+
+#define FNIC_FC_EDTOV_NSEC (0x400)
+#define FNIC_NSEC_TO_MSEC (0x1000000)
+#define FCP_PRLI_FUNC_TARGET (0x0010)
+
+#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21)
+#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98)
+#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99)
+#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29)
+#define FNIC_FC_R_CTL_FC4_SCTL (0x03)
+#define FNIC_FC_CS_CTL (0x00)
+
+#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ)
+#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA)
+#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \
+ (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ)
+#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT)
+#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \
+ (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT)
+#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \
+ (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL)
+#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS)
+#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS)
+#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT)
+#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL)
+
+#define FNIC_FC_C3_RDF (0xfff)
+#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \
+ (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \
+ (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF)))
+#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \
+ (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \
+ (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff)))
+
+/* FLOGI/PLOGI struct */
+struct fc_std_flogi {
+ struct fc_frame_header fchdr;
+ struct fc_els_flogi els;
+} __packed;
+
+struct fc_std_els_acc_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_acc acc;
+} __packed;
+
+struct fc_std_els_rjt_rsp {
+ struct fc_frame_header fchdr;
+ struct fc_els_ls_rjt rej;
+} __packed;
+
+struct fc_std_els_adisc {
+ struct fc_frame_header fchdr;
+ struct fc_els_adisc els;
+} __packed;
+
+struct fc_std_rls_acc {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls_resp els;
+} __packed;
+
+struct fc_std_abts_ba_acc {
+ struct fc_frame_header fchdr;
+ struct fc_ba_acc acc;
+} __packed;
+
+struct fc_std_abts_ba_rjt {
+ struct fc_frame_header fchdr;
+ struct fc_ba_rjt rjt;
+} __packed;
+
+struct fc_std_els_prli {
+ struct fc_frame_header fchdr;
+ struct fc_els_prli els_prli;
+ struct fc_els_spp sp;
+} __packed;
+
+struct fc_std_rpn_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rn_id rpn_id;
+} __packed;
+
+struct fc_std_fdmi_rhba {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rhba rhba;
+} __packed;
+
+struct fc_std_fdmi_rpa {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_fdmi_rpa rpa;
+} __packed;
+
+struct fc_std_rft_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rft_id rft_id;
+} __packed;
+
+struct fc_std_rff_id {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_rff_id rff_id;
+} __packed;
+
+struct fc_std_gpn_ft {
+ struct fc_frame_header fchdr;
+ struct fc_ct_hdr fc_std_ct_hdr;
+ struct fc_ns_gid_ft gpn_ft;
+} __packed;
+
+/* Accept CT_IU for GPN_FT */
+struct fc_gpn_ft_rsp_iu {
+ uint8_t ctrl;
+ uint8_t fcid[3];
+ uint32_t rsvd;
+ __be64 wwpn;
+} __packed;
+
+struct fc_std_rls {
+ struct fc_frame_header fchdr;
+ struct fc_els_rls els;
+} __packed;
+
+struct fc_std_scr {
+ struct fc_frame_header fchdr;
+ struct fc_els_scr scr;
+} __packed;
+
+struct fc_std_rscn {
+ struct fc_frame_header fchdr;
+ struct fc_els_rscn els;
+} __packed;
+
+struct fc_std_logo {
+ struct fc_frame_header fchdr;
+ struct fc_els_logo els;
+} __packed;
+
+#define FNIC_ETH_FCOE_HDRS_OFFSET \
+ (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr))
+
+#endif /* _FDLS_FC_H */
diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c
new file mode 100644
index 000000000000..6e7c0b00eb41
--- /dev/null
+++ b/drivers/scsi/fnic/fip.c
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#include "fnic.h"
+#include "fip.h"
+#include <linux/etherdevice.h>
+
+#define FIP_FNIC_RESET_WAIT_COUNT 15
+
+/**
+ * fnic_fcoe_reset_vlans - Free up the list of discovered vlans
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+ unsigned long flags;
+ struct fcoe_vlan *vlan, *next;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (!list_empty(&fnic->vlan_list)) {
+ list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) {
+ list_del(&vlan->list);
+ kfree(vlan);
+ }
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Reset vlan complete\n");
+}
+
+/**
+ * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u64 vlan_tov;
+ struct fip_vlan_req *pvlan_req;
+ uint16_t frame_size = sizeof(struct fip_vlan_req);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send VLAN req");
+ return;
+ }
+
+ fnic_fcoe_reset_vlans(fnic);
+
+ fnic->set_vlan(fnic, 0);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "set vlan done\n");
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0],
+ iport->hwmac[1], iport->hwmac[2], iport->hwmac[3],
+ iport->hwmac[4], iport->hwmac[5]);
+
+ pvlan_req = (struct fip_vlan_req *) frame;
+ *pvlan_req = (struct fip_vlan_req) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_VLAN),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC,
+ .fip_dlen = 2}}
+ };
+
+ memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Send VLAN req\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov));
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fip timer set\n");
+}
+
+/**
+ * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and
+ * populates VLAN list.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received FIP frame
+ *
+ * Will wait for responses from multiple FCFs until timeout.
+ */
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ u16 vid;
+ int num_vlan = 0;
+ int cur_desc, desc_len;
+ struct fcoe_vlan *vlan;
+ struct fip_vlan_desc *vlan_desc;
+ unsigned long flags;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p got vlan resp\n", fnic);
+
+ desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "desc_len %d\n", desc_len);
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+
+ cur_desc = 0;
+ while (desc_len > 0) {
+ vlan_desc =
+ (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc)
+ + cur_desc * 4);
+
+ if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) {
+ if (vlan_desc->fd_desc.fip_dlen != 1) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor length(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dlen);
+
+ }
+ num_vlan++;
+ vid = be16_to_cpu(vlan_desc->fd_vlan);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "process_vlan_resp: FIP VLAN %d\n", vid);
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+
+ if (!vlan) {
+ /* retry from timer */
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Mem Alloc failure\n");
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ goto out;
+ }
+ vlan->vid = vid & 0x0fff;
+ vlan->state = FIP_VLAN_AVAIL;
+ list_add_tail(&vlan->list, &fnic->vlan_list);
+ break;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid descriptor type(%x) in VLan response\n",
+ vlan_desc->fd_desc.fip_dtype);
+ /*
+ * Note : received a type=2 descriptor here i.e. FIP
+ * MAC Address Descriptor
+ */
+ cur_desc += vlan_desc->fd_desc.fip_dlen;
+ desc_len -= vlan_desc->fd_desc.fip_dlen;
+ }
+
+ /* any VLAN descriptors present ? */
+ if (num_vlan == 0) {
+ atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p No VLAN descriptors in FIP VLAN response\n",
+ fnic);
+ }
+
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+ out:
+ return;
+}
+
+/**
+ * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u64 fcs_tov;
+ struct fip_discovery *pdisc_sol;
+ uint16_t frame_size = sizeof(struct fip_discovery);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FCF discovery");
+ return;
+ }
+
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+
+ pdisc_sol = (struct fip_discovery *) frame;
+ *pdisc_sol = (struct fip_discovery) {
+ .eth = {.h_dest = FCOE_ALL_FCFS_MAC,
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC),
+ .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}},
+ .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1},
+ .fd_size = cpu_to_be16(FCOE_MAX_SIZE)}
+ };
+
+ memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0xFF;
+
+ FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Start FCF discovery\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+
+ iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED;
+
+ fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov));
+}
+
+/**
+ * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * FCF advertisements can be:
+ * solicited - Sent in response of a discover FCF FIP request
+ * Store the information of the FCF with highest priority.
+ * Wait until timeout in case of multiple FCFs.
+ *
+ * unsolicited - Sent periodically by the FCF for keep alive.
+ * If FLOGI is in progress or completed and the advertisement is
+ * received by our selected FCF, refresh the keep alive timer.
+ */
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph;
+ u64 fcs_ka_tov;
+ u64 tov;
+ int fka_has_changed;
+
+ switch (iport->fip.state) {
+ case FDLS_FIP_FCF_DISCOVERY_STARTED:
+ if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p Solicited adv\n", fnic);
+
+ if ((disc_adv->prio_desc.fd_pri <
+ iport->selected_fcf.fcf_priority)
+ && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FCF Available\n", fnic);
+ memcpy(iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN);
+ iport->selected_fcf.fcf_priority =
+ disc_adv->prio_desc.fd_pri;
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "adv time %d",
+ iport->selected_fcf.fka_adv_period);
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ }
+ }
+ break;
+ case FDLS_FIP_FLOGI_STARTED:
+ case FDLS_FIP_FLOGI_COMPLETE:
+ if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) {
+ /* same fcf */
+ if (memcmp
+ (iport->selected_fcf.fcf_mac,
+ disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) {
+ if (iport->selected_fcf.fka_adv_period !=
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) {
+ iport->selected_fcf.fka_adv_period =
+ be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period);
+ FNIC_FIP_DBG(KERN_INFO,
+ fnic->host,
+ fnic->fnic_num,
+ "change fka to %d",
+ iport->selected_fcf.fka_adv_period);
+ }
+
+ fka_has_changed =
+ (iport->selected_fcf.ka_disabled == 1)
+ && ((disc_adv->fka_adv_desc.fd_flags & 1) ==
+ 0);
+
+ iport->selected_fcf.ka_disabled =
+ (disc_adv->fka_adv_desc.fd_flags & 1);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period ==
+ 0))) {
+
+ fcs_ka_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcs_ka_tov));
+ } else {
+ if (timer_pending(&fnic->fcs_ka_timer))
+ timer_delete_sync(&fnic->fcs_ka_timer);
+ }
+
+ if (fka_has_changed) {
+ if (iport->selected_fcf.fka_adv_period != 0) {
+ tov =
+ jiffies +
+ msecs_to_jiffies(
+ iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies
+ (FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+ }
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ } /* end switch */
+}
+
+/**
+ * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_fcoe_start_flogi(struct fnic *fnic)
+{
+ uint8_t *frame;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi *pflogi_req;
+ u64 flogi_tov;
+ uint16_t oxid;
+ uint16_t frame_size = sizeof(struct fip_flogi);
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to start FIP FLOGI");
+ return;
+ }
+
+ pflogi_req = (struct fip_flogi *) frame;
+ *pflogi_req = (struct fip_flogi) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_LS),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN),
+ .fip_flags = cpu_to_be16(FIP_FL_FPMA)},
+ .flogi_desc = {
+ .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36},
+ .flogi = {
+ .fchdr = {
+ .fh_r_ctl = FC_RCTL_ELS_REQ,
+ .fh_d_id = {0xFF, 0xFF, 0xFE},
+ .fh_type = FC_TYPE_ELS,
+ .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0},
+ .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)},
+ .els = {
+ .fl_cmd = ELS_FLOGI,
+ .fl_csp = {
+ .sp_hi_ver =
+ FNIC_FC_PH_VER_HI,
+ .sp_lo_ver =
+ FNIC_FC_PH_VER_LO,
+ .sp_bb_cred =
+ cpu_to_be16
+ (FNIC_FC_B2B_CREDIT),
+ .sp_bb_data =
+ cpu_to_be16
+ (FNIC_FC_B2B_RDF_SZ)},
+ .fl_cssp[2].cp_class =
+ cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ)
+ },
+ }
+ },
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN);
+ if (iport->usefip)
+ memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+
+ oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI,
+ &iport->active_oxid_fabric_req);
+ if (oxid == FNIC_UNASSIGNED_OXID) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate OXID to send FIP FLOGI");
+ mempool_free(frame, fnic->frame_pool);
+ return;
+ }
+ FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid);
+
+ FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn,
+ iport->wwpn);
+ FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn,
+ iport->wwnn);
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP start FLOGI\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ iport->fip.flogi_retry++;
+
+ iport->fip.state = FDLS_FIP_FLOGI_STARTED;
+ flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout);
+ mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov));
+}
+
+/**
+ * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * If successful save assigned fc_id and MAC, program firmware
+ * and start fdls discovery, else restart vlan discovery.
+ */
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph;
+ int desc_len;
+ uint32_t s_id;
+ int frame_type;
+ uint16_t oxid;
+
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p FIP FLOGI rsp\n", fnic);
+ desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len);
+ if (desc_len != 38) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Invalid Descriptor List len (%x). Dropping frame\n",
+ desc_len);
+ return;
+ }
+
+ if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7)
+ && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36))
+ || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2)
+ && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping frame invalid type and len mix\n");
+ return;
+ }
+
+ frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr);
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ if ((fchdr->fh_f_ctl[0] != 0x98)
+ || (fchdr->fh_r_ctl != 0x23)
+ || (s_id != FC_FID_FLOGI)
+ || (frame_type != FNIC_FABRIC_FLOGI_RSP)
+ || (fchdr->fh_type != 0x01)) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n",
+ s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl,
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ return;
+ }
+
+ if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p rsp for pending FLOGI\n", fnic);
+
+ oxid = FNIC_STD_GET_OX_ID(fchdr);
+ fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req);
+ timer_delete_sync(&fnic->retry_fip_timer);
+
+ if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN)
+ && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p FLOGI success\n", fnic);
+ memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN);
+ iport->fcid =
+ ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id);
+
+ iport->r_a_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov);
+ iport->e_d_tov =
+ be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov);
+ memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac,
+ ETH_ALEN);
+ vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac);
+
+ if (fnic_fdls_register_portid(iport, iport->fcid, NULL)
+ != 0) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "fnic 0x%p flogi registration failed\n",
+ fnic);
+ return;
+ }
+
+ iport->fip.state = FDLS_FIP_FLOGI_COMPLETE;
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "iport->state:%d\n",
+ iport->state);
+ fnic_fdls_disc_start(iport);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 tov;
+
+ tov = jiffies
+ +
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer,
+ round_jiffies(tov));
+
+ tov =
+ jiffies +
+ msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer,
+ round_jiffies(tov));
+
+ }
+ } else {
+ /*
+ * If there's FLOGI rejects - clear all
+ * fcf's & restart from scratch
+ */
+ atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects);
+ /* start FCoE VLAN discovery */
+ fnic_fcoe_send_vlan_req(fnic);
+
+ iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED;
+ }
+ }
+}
+
+/**
+ * fnic_common_fip_cleanup - Clean up FCF info and timers in case of
+ * link down/CVL
+ * @fnic: Handle to fnic driver instance
+ */
+void fnic_common_fip_cleanup(struct fnic *fnic)
+{
+
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ if (!iport->usefip)
+ return;
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fip cleanup\n", fnic);
+
+ iport->fip.state = FDLS_FIP_INIT;
+
+ timer_delete_sync(&fnic->retry_fip_timer);
+ timer_delete_sync(&fnic->fcs_ka_timer);
+ timer_delete_sync(&fnic->enode_ka_timer);
+ timer_delete_sync(&fnic->vn_ka_timer);
+
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+
+ memset(iport->fpma, 0, ETH_ALEN);
+ iport->fcid = 0;
+ iport->r_a_tov = 0;
+ iport->e_d_tov = 0;
+ memset(fnic->iport.fcfmac, 0, ETH_ALEN);
+ memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN);
+ iport->selected_fcf.fcf_priority = 0;
+ iport->selected_fcf.fka_adv_period = 0;
+ iport->selected_fcf.ka_disabled = 0;
+
+ fnic_fcoe_reset_vlans(fnic);
+}
+
+/**
+ * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF.
+ * @fnic: Handle to fnic driver instance
+ * @fiph: Received frame
+ *
+ * Verify that cvl is received from our current FCF for our assigned MAC
+ * and clean up and restart the vlan discovery.
+ */
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph;
+ int i;
+ int found = false;
+ int max_count = 0;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p clear virtual link handler\n", fnic);
+
+ if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2)
+ && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2))
+ || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4)
+ && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid mix: ft %x fl %x ndt %x ndl %x",
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dtype,
+ cvl_msg->fcf_mac_desc.fd_desc.fip_dlen,
+ cvl_msg->name_desc.fd_desc.fip_dtype,
+ cvl_msg->name_desc.fd_desc.fip_dlen);
+ }
+
+ if (memcmp
+ (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN)
+ == 0) {
+ for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) {
+ if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11)
+ && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) {
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num,
+ "Invalid type and len mix type: %d len: %d\n",
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype,
+ cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen);
+ }
+ if (memcmp
+ (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac,
+ ETH_ALEN) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return;
+ fnic_common_fip_cleanup(fnic);
+
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ max_count++;
+ if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rthr waited too long. Skipping handle link event %p\n",
+ fnic);
+ return;
+ }
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait %p",
+ fnic);
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
+ fnic_fdls_link_down(iport);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic_fcoe_send_vlan_req(fnic);
+ }
+}
+
+/**
+ * fdls_fip_recv_frame - Demultiplexer for FIP frames
+ * @fnic: Handle to fnic driver instance
+ * @frame: Received ethernet frame
+ */
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame)
+{
+ struct ethhdr *eth = (struct ethhdr *)frame;
+ struct fip_header *fiph;
+ u16 op;
+ u8 sub;
+ int len = 2048;
+
+ if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) {
+ fiph = (struct fip_header *)(eth + 1);
+ op = be16_to_cpu(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming");
+
+ if (op == FIP_OP_DISC && sub == FIP_SC_REP)
+ fnic_fcoe_fip_discovery_resp(fnic, fiph);
+ else if (op == FIP_OP_VLAN && sub == FIP_SC_REP)
+ fnic_fcoe_process_vlan_resp(fnic, fiph);
+ else if (op == FIP_OP_CTRL && sub == FIP_SC_REP)
+ fnic_fcoe_process_cvl(fnic, fiph);
+ else if (op == FIP_OP_LS && sub == FIP_SC_REP)
+ fnic_fcoe_process_flogi_resp(fnic, fiph);
+
+ /* Return true if the frame was a FIP frame */
+ return true;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Not a FIP Frame");
+ return false;
+}
+
+void fnic_work_on_fip_timer(struct work_struct *work)
+{
+ struct fnic *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FIP timeout\n");
+
+ if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) {
+ fnic_vlan_discovery_timeout(fnic);
+ } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) {
+ u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FCF Discovery timeout\n");
+ if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) {
+
+ if (iport->flags & FNIC_FIRST_LINK_UP) {
+ fnic_scsi_fcpio_reset(iport->fnic);
+ iport->flags &= ~FNIC_FIRST_LINK_UP;
+ }
+
+ fnic_fcoe_start_flogi(fnic);
+ if (!((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0))) {
+ u64 fcf_tov;
+
+ fcf_tov = jiffies
+ + 3
+ *
+ msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->fcs_ka_timer,
+ round_jiffies(fcf_tov));
+ }
+ } else {
+ FNIC_FIP_DBG(KERN_INFO, fnic->host,
+ fnic->fnic_num, "FCF Discovery timeout\n");
+ fnic_vlan_discovery_timeout(fnic);
+ }
+ } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) {
+ fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req);
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI timeout\n");
+ if (iport->fip.flogi_retry < fnic->config.flogi_retries)
+ fnic_fcoe_start_flogi(fnic);
+ else
+ fnic_vlan_discovery_timeout(fnic);
+ }
+}
+
+/**
+ * fnic_handle_fip_timer - Timeout handler for FIP discover phase.
+ * @t: Handle to the timer list
+ *
+ * Based on the current state, start next phase or restart discovery.
+ */
+void fnic_handle_fip_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, retry_fip_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
+
+/**
+ * fnic_handle_enode_ka_timer - FIP node keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_enode_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, enode_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_enode_ka *penode_ka;
+ u64 enode_ka_tov;
+ uint16_t frame_size = sizeof(struct fip_enode_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send enode ka");
+ return;
+ }
+
+ penode_ka = (struct fip_enode_ka *) frame;
+ *penode_ka = (struct fip_enode_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}
+ };
+
+ memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN);
+ memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle enode KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ enode_ka_tov = jiffies
+ + msecs_to_jiffies(iport->selected_fcf.fka_adv_period);
+ mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov));
+}
+
+/**
+ * fnic_handle_vn_ka_timer - FIP virtual port keep alive.
+ * @t: Handle to the timer list
+ */
+void fnic_handle_vn_ka_timer(struct timer_list *t)
+{
+ uint8_t *frame;
+ struct fnic *fnic = from_timer(fnic, t, vn_ka_timer);
+
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fip_vn_port_ka *pvn_port_ka;
+ u64 vn_ka_tov;
+ uint8_t fcid[3];
+ uint16_t frame_size = sizeof(struct fip_vn_port_ka);
+
+ if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE)
+ return;
+
+ if ((iport->selected_fcf.ka_disabled)
+ || (iport->selected_fcf.fka_adv_period == 0)) {
+ return;
+ }
+
+ frame = fdls_alloc_frame(iport);
+ if (frame == NULL) {
+ FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Failed to allocate frame to send vn ka");
+ return;
+ }
+
+ pvn_port_ka = (struct fip_vn_port_ka *) frame;
+ *pvn_port_ka = (struct fip_vn_port_ka) {
+ .eth = {
+ .h_proto = cpu_to_be16(ETH_P_FIP)},
+ .fip = {
+ .fip_ver = FIP_VER_ENCAPS(FIP_VER),
+ .fip_op = cpu_to_be16(FIP_OP_CTRL),
+ .fip_subcode = FIP_SC_REQ,
+ .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)},
+ .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}},
+ .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}}
+ };
+
+ memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN);
+ memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN);
+ memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN);
+ memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN);
+ hton24(fcid, iport->fcid);
+ memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3);
+ FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn);
+
+ FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Handle vnport KA timer\n");
+ fnic_send_fip_frame(iport, frame, frame_size);
+ vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
+ mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov));
+}
+
+/**
+ * fnic_vlan_discovery_timeout - Handle vlan discovery timeout
+ * @fnic: Handle to fnic driver instance
+ *
+ * End of VLAN discovery or FCF discovery time window.
+ * Start the FCF discovery if VLAN was never used.
+ */
+void fnic_vlan_discovery_timeout(struct fnic *fnic)
+{
+ struct fcoe_vlan *vlan;
+ struct fnic_iport_s *iport = &fnic->iport;
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (!iport->usefip)
+ return;
+
+ spin_lock_irqsave(&fnic->vlans_lock, flags);
+ if (list_empty(&fnic->vlan_list)) {
+ /* no vlans available, try again */
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+
+ vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list);
+
+ if (vlan->state == FIP_VLAN_SENT) {
+ if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+ /*
+ * no response on this vlan, remove from the list.
+ * Try the next vlan
+ */
+ list_del(&vlan->list);
+ kfree(vlan);
+ vlan = NULL;
+ if (list_empty(&fnic->vlan_list)) {
+ /* we exhausted all vlans, restart vlan disc */
+ spin_unlock_irqrestore(&fnic->vlans_lock,
+ flags);
+ fnic_fcoe_send_vlan_req(fnic);
+ return;
+ }
+ /* check the next vlan */
+ vlan =
+ list_first_entry(&fnic->vlan_list, struct fcoe_vlan,
+ list);
+
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+
+ }
+ atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
+
+ } else {
+ fnic->set_vlan(fnic, vlan->vid);
+ vlan->state = FIP_VLAN_SENT; /* sent now */
+ }
+ vlan->sol_count++;
+ spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ fnic_fcoe_start_fcf_discovery(fnic);
+}
+
+/**
+ * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer.
+ * @work: the work queue to be serviced
+ *
+ * Finish handling fcs_ka_timer in process context.
+ * Clean up, bring the link down, and restart all FIP discovery.
+ */
+void fnic_work_on_fcs_ka_timer(struct work_struct *work)
+{
+ struct fnic
+ *fnic = container_of(work, struct fnic, fip_timer_work);
+ struct fnic_iport_s *iport = &fnic->iport;
+
+ FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs ka timeout\n", fnic);
+
+ fnic_common_fip_cleanup(fnic);
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic_fdls_link_down(iport);
+ iport->state = FNIC_IPORT_STATE_FIP;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+
+ fnic_fcoe_send_vlan_req(fnic);
+}
+
+/**
+ * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer.
+ * @t: Handle to the timer list
+ *
+ * No keep alives received from FCF. Clean up, bring the link down
+ * and restart all the FIP discovery.
+ */
+void fnic_handle_fcs_ka_timer(struct timer_list *t)
+{
+ struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer);
+
+ INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer);
+ queue_work(fnic_fip_queue, &fnic->fip_timer_work);
+}
diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h
new file mode 100644
index 000000000000..79fee7628870
--- /dev/null
+++ b/drivers/scsi/fnic/fip.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+#ifndef _FIP_H_
+#define _FIP_H_
+
+#include "fdls_fc.h"
+#include "fnic_fdls.h"
+#include <scsi/fc/fc_fip.h>
+
+/* Drop the cast from the standard definition */
+#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02}
+#define FCOE_MAX_SIZE 0x082E
+
+#define FCOE_CTLR_FIPVLAN_TOV (3*1000)
+#define FCOE_CTLR_FCS_TOV (3*1000)
+#define FCOE_CTLR_MAX_SOL (5*1000)
+
+#define FIP_DISC_SOL_LEN (6)
+#define FIP_VLAN_REQ_LEN (2)
+#define FIP_ENODE_KA_LEN (2)
+#define FIP_VN_KA_LEN (7)
+#define FIP_FLOGI_LEN (38)
+
+enum fdls_vlan_state {
+ FIP_VLAN_AVAIL,
+ FIP_VLAN_SENT
+};
+
+enum fdls_fip_state {
+ FDLS_FIP_INIT,
+ FDLS_FIP_VLAN_DISCOVERY_STARTED,
+ FDLS_FIP_FCF_DISCOVERY_STARTED,
+ FDLS_FIP_FLOGI_STARTED,
+ FDLS_FIP_FLOGI_COMPLETE,
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+ struct list_head list;
+ uint16_t vid; /* vlan ID */
+ uint16_t sol_count; /* no. of sols sent */
+ uint16_t state; /* state */
+};
+
+struct fip_vlan_req {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_vlan_notif {
+ struct fip_header fip;
+ struct fip_vlan_desc vlans_desc[];
+} __packed;
+
+struct fip_vn_port_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_vn_desc vn_port_desc;
+} __packed;
+
+struct fip_enode_ka {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_cvl {
+ struct fip_header fip;
+ struct fip_mac_desc fcf_mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_vn_desc vn_ports_desc[];
+} __packed;
+
+struct fip_flogi_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi_rsp_desc {
+ struct fip_desc fd_desc;
+ uint16_t rsvd;
+ struct fc_std_flogi flogi;
+} __packed;
+
+struct fip_flogi {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_flogi_desc flogi_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_flogi_rsp {
+ struct fip_header fip;
+ struct fip_flogi_rsp_desc rsp_desc;
+ struct fip_mac_desc mac_desc;
+} __packed;
+
+struct fip_discovery {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_size_desc fcoe_desc;
+} __packed;
+
+struct fip_disc_adv {
+ struct fip_header fip;
+ struct fip_pri_desc prio_desc;
+ struct fip_mac_desc mac_desc;
+ struct fip_wwn_desc name_desc;
+ struct fip_fab_desc fabric_desc;
+ struct fip_fka_desc fka_adv_desc;
+} __packed;
+
+void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph);
+void fnic_work_on_fip_timer(struct work_struct *work);
+void fnic_work_on_fcs_ka_timer(struct work_struct *work);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_fcoe_start_fcf_discovery(struct fnic *fnic);
+void fnic_fcoe_start_flogi(struct fnic *fnic);
+void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph);
+void fnic_vlan_discovery_timeout(struct fnic *fnic);
+
+extern struct workqueue_struct *fnic_fip_queue;
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx)
+{
+ struct fip_header *fiph = (struct fip_header *)(eth + 1);
+ u16 op = be16_to_cpu(fiph->fip_op);
+ u8 sub = fiph->fip_subcode;
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)",
+ pfx, op, sub, len);
+
+ fnic_debug_dump(fnic, (uint8_t *)eth, len);
+}
+
+#else /* FNIC_DEBUG */
+
+static inline void
+fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth,
+ int len, char *pfx) {}
+#endif /* FNIC_DEBUG */
+
+#endif /* _FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce73f08ee889..c2fdc6553e62 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -10,8 +10,10 @@
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <scsi/libfc.h>
-#include <scsi/libfcoe.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic_res.h"
#include "fnic_trace.h"
@@ -24,13 +26,15 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_scsi.h"
+#include "fnic_fdls.h"
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.7.0.0"
+#define DRV_VERSION "1.8.0.2"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
+#define FABRIC_LOGO_MAX_RETRY 3
#define DESC_CLEAN_LOW_WATERMARK 8
#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */
#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */
@@ -38,6 +42,7 @@
#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */
#define FNIC_DFLT_QUEUE_DEPTH 256
#define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */
+#define LUN0_DELAY_TIME 9
/*
* Tag bits used for special requests.
@@ -75,6 +80,77 @@
#define FNIC_DEV_RST_TERM_DONE BIT(20)
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */
+#define FNIC_FCOE_MAX_CMD_LEN 16
+/* Retry supported by rport (returned by PRLI service parameters) */
+#define FNIC_FC_RP_FLAGS_RETRY 0x1
+
+/* Cisco vendor id */
+#define PCI_VENDOR_ID_CISCO 0x1137
+#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */
+
+/* sereno pcie switch */
+#define PCI_DEVICE_ID_CISCO_SERENO 0x004e
+#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */
+#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */
+#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */
+
+/* Sereno */
+#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */
+#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */
+#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */
+#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */
+#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */
+#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */
+
+/* Cruz */
+#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */
+/* Cruz MountTian SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b
+#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */
+/* Cruz MountTian2 SIOC */
+#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157
+#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */
+
+/* Bodega */
+/* VIC 1457 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218
+#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */
+/* VIC 1487 PCIe mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a
+#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */
+/* VIC 1440 Mezz mLOM */
+#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215
+#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */
+#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */
+#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */
+#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */
+#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */
+#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */
+
+/* Beverly */
+#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */
+#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */
+#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */
+#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */
+#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */
+#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */
+#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */
+#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */
+#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */
+#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */
+
+struct fnic_pcie_device {
+ u32 device;
+ u8 *desc;
+ u32 subsystem_device;
+ u8 *subsys_desc;
+};
+
/*
* fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
@@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
#define fnic_clear_state_flags(fnicp, st_flags) \
__fnic_set_state_flags(fnicp, st_flags, 1)
+enum reset_states {
+ NOT_IN_PROGRESS = 0,
+ IN_PROGRESS,
+ RESET_ERROR
+};
+
+enum rscn_type {
+ NOT_PC_RSCN = 0,
+ PC_RSCN
+};
+
+enum pc_rscn_handling_status {
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0,
+ PC_RSCN_HANDLING_IN_PROGRESS
+};
+
+enum pc_rscn_handling_feature {
+ PC_RSCN_HANDLING_FEATURE_OFF = 0,
+ PC_RSCN_HANDLING_FEATURE_ON
+};
+
+extern unsigned int fnic_fdmi_support;
extern unsigned int fnic_log_level;
extern unsigned int io_completions;
+extern struct workqueue_struct *fnic_event_queue;
+
+extern unsigned int pc_rscn_handling_feature_flag;
+extern spinlock_t reset_fnic_list_lock;
+extern struct list_head reset_fnic_list;
+extern struct workqueue_struct *reset_fnic_work_queue;
+extern struct work_struct reset_fnic_work;
+
#define FNIC_MAIN_LOGGING 0x01
#define FNIC_FCS_LOGGING 0x02
@@ -155,6 +261,12 @@ do { \
"fnic<%d>: %s: %d: " fmt, fnic_num,\
__func__, __LINE__, ##args);)
+#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \
+ FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \
+ shost_printk(kern_level, host, \
+ "fnic<%d>: %s: %d: " fmt, fnic_num,\
+ __func__, __LINE__, ##args);)
+
#define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \
FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \
shost_printk(kern_level, host, \
@@ -213,12 +325,26 @@ enum fnic_state {
struct mempool;
+enum fnic_role_e {
+ FNIC_ROLE_FCP_INITIATOR = 0,
+};
+
enum fnic_evt {
FNIC_EVT_START_VLAN_DISC = 1,
FNIC_EVT_START_FCF_DISC = 2,
FNIC_EVT_MAX,
};
+struct fnic_frame_list {
+ /*
+ * Link to frame lists
+ */
+ struct list_head links;
+ void *fp;
+ int frame_len;
+ int rx_ethhdr_stripped;
+};
+
struct fnic_event {
struct list_head list;
struct fnic *fnic;
@@ -235,8 +361,9 @@ struct fnic_cpy_wq {
/* Per-instance private data structure */
struct fnic {
int fnic_num;
- struct fc_lport *lport;
- struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
+ enum fnic_role_e role;
+ struct fnic_iport_s iport;
+ struct Scsi_Host *host;
struct vnic_dev_bar bar0;
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
@@ -255,6 +382,7 @@ struct fnic {
unsigned int wq_count;
unsigned int cq_count;
+ struct completion reset_completion_wait;
struct mutex sgreset_mutex;
spinlock_t sgreset_lock; /* lock for sgreset */
struct scsi_cmnd *sgreset_sc;
@@ -268,25 +396,27 @@ struct fnic {
u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */
- u32 link_events:1; /* set when we get any link event*/
-
- struct completion *remove_wait; /* device remove thread blocks */
+ struct completion *fw_reset_done;
+ u32 reset_in_progress;
atomic_t in_flight; /* io counter */
bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
+ unsigned long lock_flags;
u16 vlan_id; /* VLAN tag including priority */
u8 data_src_addr[ETH_ALEN];
u64 fcp_input_bytes; /* internal statistic */
u64 fcp_output_bytes; /* internal statistic */
u32 link_down_cnt;
+ u32 soft_reset_count;
int link_status;
struct list_head list;
+ struct list_head links;
struct pci_dev *pdev;
struct vnic_fc_config config;
struct vnic_dev *vdev;
@@ -306,19 +436,29 @@ struct fnic {
struct work_struct link_work;
struct work_struct frame_work;
struct work_struct flush_work;
- struct sk_buff_head frame_queue;
- struct sk_buff_head tx_queue;
+ struct list_head frame_queue;
+ struct list_head tx_queue;
+ mempool_t *frame_pool;
+ mempool_t *frame_elem_pool;
+ struct work_struct tport_work;
+ struct list_head tport_event_list;
+
+ char subsys_desc[14];
+ int subsys_desc_len;
+ int pc_rscn_handling_status;
/*** FIP related data members -- start ***/
void (*set_vlan)(struct fnic *, u16 vlan);
struct work_struct fip_frame_work;
- struct sk_buff_head fip_frame_queue;
+ struct work_struct fip_timer_work;
+ struct list_head fip_frame_queue;
struct timer_list fip_timer;
- struct list_head vlans;
spinlock_t vlans_lock;
-
- struct work_struct event_work;
- struct list_head evlist;
+ struct timer_list retry_fip_timer;
+ struct timer_list fcs_ka_timer;
+ struct timer_list enode_ka_timer;
+ struct timer_list vn_ka_timer;
+ struct list_head vlan_list;
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
@@ -341,11 +481,6 @@ struct fnic {
____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX];
};
-static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
-{
- return container_of(fip, struct fnic, ctlr);
-}
-
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
extern const struct attribute_group *fnic_host_groups[];
@@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic);
void fnic_free_intr(struct fnic *fnic);
int fnic_request_intr(struct fnic *fnic);
-int fnic_send(struct fc_lport *, struct fc_frame *);
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
void fnic_handle_frame(struct work_struct *work);
+void fnic_tport_event_handler(struct work_struct *work);
void fnic_handle_link(struct work_struct *work);
void fnic_handle_event(struct work_struct *work);
+void fdls_reclaim_oxid_handler(struct work_struct *work);
+void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid);
+void fdls_schedule_oxid_free_retry_work(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
void fnic_flush_tx(struct work_struct *work);
-void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
-void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-void fnic_update_mac(struct fc_lport *, u8 *new);
void fnic_update_mac_locked(struct fnic *, u8 *new);
int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fnic_abort_cmd(struct scsi_cmnd *);
int fnic_device_reset(struct scsi_cmnd *);
-int fnic_host_reset(struct scsi_cmnd *);
-int fnic_reset(struct Scsi_Host *);
-void fnic_scsi_cleanup(struct fc_lport *);
-void fnic_scsi_abort_io(struct fc_lport *);
-void fnic_empty_scsi_cleanup(struct fc_lport *);
-void fnic_exch_mgr_reset(struct fc_lport *, u32, u32);
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc);
+int fnic_host_reset(struct Scsi_Host *shost);
+void fnic_reset(struct Scsi_Host *shost);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost);
+void fnic_get_host_port_state(struct Scsi_Host *shost);
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index);
int fnic_wq_cmpl_handler(struct fnic *fnic, int);
int fnic_flogi_reg_handler(struct fnic *fnic, u32);
@@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_mq_map_queues_cpus(struct Scsi_Host *host);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
-
+int fnic_stats_debugfs_init(struct fnic *fnic);
+void fnic_stats_debugfs_remove(struct fnic *fnic);
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_reset_work_handler(struct work_struct *work);
void fnic_handle_fip_event(struct fnic *fnic);
void fnic_fcoe_reset_vlans(struct fnic *fnic);
-void fnic_fcoe_evlist_free(struct fnic *fnic);
-extern void fnic_handle_fip_timer(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct timer_list *t);
static inline int
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
@@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
}
void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *);
+void fnic_free_txq(struct list_head *head);
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc);
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup);
+void fnic_delete_fcp_tports(struct fnic *fnic);
+void fnic_flush_tport_event_list(struct fnic *fnic);
+int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid);
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid);
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic);
+unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq,
+ struct scsi_device *device);
+unsigned int fnic_count_lun_ioreqs(struct fnic *fnic,
+ struct scsi_device *device);
+void fnic_scsi_unload(struct fnic *fnic);
+void fnic_scsi_unload_cleanup(struct fnic *fnic);
+int fnic_get_debug_info(struct stats_debug_info *info,
+ struct fnic *fnic);
+
+struct fnic_scsi_iter_data {
+ struct fnic *fnic;
+ void *data1;
+ void *data2;
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2);
+};
+
+static inline bool
+fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data)
+{
+ struct fnic_scsi_iter_data *iter = iter_data;
+
+ return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
+}
+
+static inline void
+fnic_scsi_io_iter(struct fnic *fnic,
+ bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2),
+ void *data1, void *data2)
+{
+ struct fnic_scsi_iter_data iter_data = {
+ .fn = fn,
+ .fnic = fnic,
+ .data1 = data1,
+ .data2 = data2,
+ };
+ scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data);
+}
+
+#ifdef FNIC_DEBUG
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i = i+8) {
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8,
+ u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3],
+ u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]);
+ }
+}
+
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ int len, char *pfx)
+{
+ uint32_t s_id, d_id;
+
+ s_id = ntoh24(fchdr->fh_s_id);
+ d_id = ntoh24(fchdr->fh_d_id);
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n",
+ pfx, s_id, d_id, fchdr->fh_type,
+ FNIC_STD_GET_OX_ID(fchdr), len);
+
+ fnic_debug_dump(fnic, (uint8_t *)fchdr, len);
+
+}
+#else /* FNIC_DEBUG */
+static inline void
+fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {}
+static inline void
+fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr,
+ uint32_t len, char *pfx) {}
+#endif /* FNIC_DEBUG */
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index 0c5e57c7e322..705718f0809b 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -11,8 +11,8 @@
static ssize_t fnic_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]);
}
@@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev,
static ssize_t fnic_show_link_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct fc_lport *lp = shost_priv(class_to_shost(dev));
+ struct fnic *fnic =
+ *((struct fnic **) shost_priv(class_to_shost(dev)));
- return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down");
+ return sysfs_emit(buf, "%s\n",
+ ((fnic->iport.state != FNIC_IPORT_STATE_INIT) &&
+ (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ?
+ "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 2619a2d4f5f1..5767862ae42f 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -7,6 +7,9 @@
#include <linux/vmalloc.h>
#include "fnic.h"
+extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer,
+ struct fnic *fnic);
+
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
@@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode,
debug->buf_size = buf_size;
memset((void *)debug->debug_buffer, 0, buf_size);
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+ debug->buffer_len += fnic_get_debug_info(debug, fnic);
file->private_data = debug;
@@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = {
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
-void fnic_stats_debugfs_init(struct fnic *fnic)
+int fnic_stats_debugfs_init(struct fnic *fnic)
{
char name[16];
- snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+ snprintf(name, sizeof(name), "host%d", fnic->host->host_no);
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
-
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
-
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
+ return 0;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index a08293b2ad9f..103ab6f1f7cd 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -14,701 +14,379 @@
#include <linux/workqueue.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_els.h>
-#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
-#include <scsi/libfc.h>
+#include <linux/etherdevice.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
-#include "fnic_fip.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
+#include "fip.h"
+
+#define MAX_RESET_WAIT_COUNT 64
-static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
-struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
-static void fnic_set_eth_mode(struct fnic *);
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
+static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC;
-void fnic_handle_link(struct work_struct *work)
+/*
+ * Internal Functions
+ * This function will initialize the src_mac address to be
+ * used in outgoing frames
+ */
+static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic,
+ uint8_t *src_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, link_work);
- unsigned long flags;
- int old_link_status;
- u32 old_link_down_cnt;
- u64 old_port_speed, new_port_speed;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- fnic->link_events = 1; /* less work to just set everytime*/
-
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- old_link_down_cnt = fnic->link_down_cnt;
- old_link_status = fnic->link_status;
- old_port_speed = atomic64_read(
- &fnic->fnic_stats.misc_stats.current_port_speed);
-
- fnic->link_status = vnic_dev_link_status(fnic->vdev);
- fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
-
- new_port_speed = vnic_dev_port_speed(fnic->vdev);
- atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
- new_port_speed);
- if (old_port_speed != new_port_speed)
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Current vnic speed set to: %llu\n",
- new_port_speed);
-
- switch (vnic_dev_port_speed(fnic->vdev)) {
- case DCEM_PORTSPEED_10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
- break;
- case DCEM_PORTSPEED_20G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
- break;
- case DCEM_PORTSPEED_25G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
- break;
- case DCEM_PORTSPEED_40G:
- case DCEM_PORTSPEED_4x10G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
- break;
- case DCEM_PORTSPEED_100G:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
- break;
- default:
- fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
- fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
- break;
- }
-
- if (old_link_status == fnic->link_status) {
- if (!fnic->link_status) {
- /* DOWN -> DOWN */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN->DOWN",
- strlen("Link Status: DOWN->DOWN"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->down\n");
- } else {
- if (old_link_down_cnt != fnic->link_down_cnt) {
- /* UP -> DOWN -> UP */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status:UP_DOWN_UP",
- strlen("Link_Status:UP_DOWN_UP")
- );
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "link down\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no,
- FNIC_FC_LE,
- "Link Status: UP_DOWN_UP_VLAN",
- strlen(
- "Link Status: UP_DOWN_UP_VLAN")
- );
- fnic_fcoe_send_vlan_req(fnic);
- return;
- }
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down->up: Link up\n");
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_UP",
- strlen("Link Status: UP_UP"));
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->up\n");
- }
- }
- } else if (fnic->link_status) {
- /* DOWN -> UP */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- /* start FCoE VLAN discovery */
- fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
- strlen("Link Status: DOWN_UP_VLAN"));
- fnic_fcoe_send_vlan_req(fnic);
-
- return;
- }
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "down->up: Link up\n");
- fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
- fcoe_ctlr_link_up(&fnic->ctlr);
- } else {
- /* UP -> DOWN */
- fnic->lport->host_stats.link_failure_count++;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "up->down: Link down\n");
- fnic_fc_trace_set_data(
- fnic->lport->host->host_no, FNIC_FC_LE,
- "Link Status: UP_DOWN",
- strlen("Link Status: UP_DOWN"));
- if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "deleting fip-timer during link-down\n");
- del_timer_sync(&fnic->fip_timer);
- }
- fcoe_ctlr_link_down(&fnic->ctlr);
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ src_mac[0], src_mac[1], src_mac[2], src_mac[3],
+ src_mac[4], src_mac[5]);
+ memcpy(fnic->iport.fpma, src_mac, 6);
}
/*
- * This function passes incoming fabric frames to libFC
+ * This function will initialize the dst_mac address to be
+ * used in outgoing frames
*/
-void fnic_handle_frame(struct work_struct *work)
+static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic,
+ uint8_t *dst_mac)
{
- struct fnic *fnic = container_of(work, struct fnic, frame_work);
- struct fc_lport *lp = fnic->lport;
- unsigned long flags;
- struct sk_buff *skb;
- struct fc_frame *fp;
-
- while ((skb = skb_dequeue(&fnic->frame_queue))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3],
+ dst_mac[4], dst_mac[5]);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
- return;
- }
- fp = (struct fc_frame *)skb;
-
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- fc_exch_recv(lp, fp);
- }
+ memcpy(fnic->iport.fcfmac, dst_mac, 6);
}
-void fnic_fcoe_evlist_free(struct fnic *fnic)
+void fnic_get_host_port_state(struct Scsi_Host *shost)
{
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+ struct fnic_iport_s *iport = &fnic->iport;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- list_del(&fevt->list);
- kfree(fevt);
- }
+ if (!fnic->link_status)
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else if (iport->state == FNIC_IPORT_STATE_READY)
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_event(struct work_struct *work)
+void fnic_fdls_link_status_change(struct fnic *fnic, int linkup)
{
- struct fnic *fnic = container_of(work, struct fnic, event_work);
- struct fnic_event *fevt = NULL;
- struct fnic_event *next = NULL;
- unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (list_empty(&fnic->evlist)) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
- list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
- if (fnic->stop_rx_link_events) {
- list_del(&fevt->list);
- kfree(fevt);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
- /*
- * If we're in a transitional state, just re-queue and return.
- * The queue will be serviced when we get to a stable state.
- */
- if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- list_del(&fevt->list);
- switch (fevt->event) {
- case FNIC_EVT_START_VLAN_DISC:
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if (linkup) {
+ if (iport->usefip) {
+ iport->state = FNIC_IPORT_STATE_FIP;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "link up: %d, usefip: %d", linkup, iport->usefip);
fnic_fcoe_send_vlan_req(fnic);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- break;
- case FNIC_EVT_START_FCF_DISC:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Start FCF Discovery\n");
- fnic_fcoe_start_fcf_disc(fnic);
- break;
- default:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unknown event 0x%x\n", fevt->event);
- break;
+ } else {
+ iport->state = FNIC_IPORT_STATE_FABRIC_DISC;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport->state: %d", iport->state);
+ fnic_fdls_disc_start(iport);
}
- kfree(fevt);
+ } else {
+ iport->state = FNIC_IPORT_STATE_LINK_WAIT;
+ if (!is_zero_ether_addr(iport->fpma))
+ vnic_dev_del_addr(fnic->vdev, iport->fpma);
+ fnic_common_fip_cleanup(fnic);
+ fnic_fdls_link_down(iport);
+
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-/**
- * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
- * @fip: The FCoE controller that received the frame
- * @skb: The received FIP frame
- *
- * Returns non-zero if the frame is rejected with unsupported cmd with
- * insufficient resource els explanation.
+
+/*
+ * FPMA can be either taken from ethhdr(dst_mac) or flogi resp
+ * or derive from FC_MAP and FCID combination. While it should be
+ * same, revisit this if there is any possibility of not-correct.
*/
-static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
- struct sk_buff *skb)
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid)
{
- struct fc_lport *lport = fip->lp;
- struct fip_header *fiph;
- struct fc_frame_header *fh = NULL;
- struct fip_desc *desc;
- struct fip_encaps *els;
- u16 op;
- u8 els_op;
- u8 sub;
-
- size_t rlen;
- size_t dlen = 0;
-
- if (skb_linearize(skb))
- return 0;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr = (struct ethhdr *) rx_frame;
+ uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 };
- if (skb->len < sizeof(*fiph))
- return 0;
+ memcpy(&fcmac[3], fcid, 3);
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ ethhdr->h_dest[0], ethhdr->h_dest[1],
+ ethhdr->h_dest[2], ethhdr->h_dest[3],
+ ethhdr->h_dest[4], ethhdr->h_dest[5]);
- if (op != FIP_OP_LS)
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x",
+ fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4],
+ fcmac[5]);
- if (sub != FIP_SC_REP)
- return 0;
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- if (rlen + sizeof(*fiph) > skb->len)
- return 0;
-
- desc = (struct fip_desc *)(fiph + 1);
- dlen = desc->fip_dlen * FIP_BPW;
+ fnic_fdls_set_fcoe_srcmac(fnic, fcmac);
+ fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source);
+}
- if (desc->fip_dtype == FIP_DT_FLOGI) {
+void fnic_fdls_init(struct fnic *fnic, int usefip)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
- if (dlen < sizeof(*els) + sizeof(*fh) + 1)
- return 0;
+ /* Initialize iPort structure */
+ iport->state = FNIC_IPORT_STATE_INIT;
+ iport->fnic = fnic;
+ iport->usefip = usefip;
- els = (struct fip_encaps *)desc;
- fh = (struct fc_frame_header *)(els + 1);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->hwmac[0], iport->hwmac[1], iport->hwmac[2],
+ iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]);
- if (!fh)
- return 0;
+ INIT_LIST_HEAD(&iport->tport_list);
+ INIT_LIST_HEAD(&iport->tport_list_pending_del);
- /*
- * ELS command code, reason and explanation should be = Reject,
- * unsupported command and insufficient resource
- */
- els_op = *(u8 *)(fh + 1);
- if (els_op == ELS_LS_RJT) {
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Rejected by Switch\n");
- return 1;
- }
- shost_printk(KERN_INFO, lport->host,
- "Flogi Request Accepted by Switch\n");
- }
- return 0;
+ fnic_fdls_disc_init(iport);
}
-static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+void fnic_handle_link(struct work_struct *work)
{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- struct sk_buff *skb;
- char *eth_fr;
- struct fip_vlan *vlan;
- u64 vlan_tov;
+ struct fnic *fnic = container_of(work, struct fnic, link_work);
+ int old_link_status;
+ u32 old_link_down_cnt;
+ int max_count = 0;
- fnic_fcoe_reset_vlans(fnic);
- fnic->set_vlan(fnic, 0);
+ if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI)
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Interrupt mode is not MSI\n");
- if (printk_ratelimit())
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Sending VLAN request...\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
- skb = dev_alloc_skb(sizeof(struct fip_vlan));
- if (!skb)
+ if (fnic->stop_rx_link_events) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Stop link rx events\n");
return;
-
- eth_fr = (char *)skb->data;
- vlan = (struct fip_vlan *)eth_fr;
-
- memset(vlan, 0, sizeof(*vlan));
- memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
- memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
- vlan->eth.h_proto = htons(ETH_P_FIP);
-
- vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
- vlan->fip.fip_op = htons(FIP_OP_VLAN);
- vlan->fip.fip_subcode = FIP_SC_VL_REQ;
- vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
-
- vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
- vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
- memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
-
- vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
- vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
- put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
- atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
-
- skb_put(skb, sizeof(*vlan));
- skb->protocol = htons(ETH_P_FIP);
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- fip->send(fip, skb);
-
- /* set a timer so that we can retry if there no response */
- vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
- mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
-}
-
-static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
-{
- struct fcoe_ctlr *fip = &fnic->ctlr;
- struct fip_header *fiph;
- struct fip_desc *desc;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u16 vid;
- size_t rlen;
- size_t dlen;
- struct fcoe_vlan *vlan;
- u64 sol_time;
- unsigned long flags;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response...\n");
-
- fiph = (struct fip_header *) skb->data;
-
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
- ntohs(fiph->fip_op), fiph->fip_subcode);
-
- rlen = ntohs(fiph->fip_dl_len) * 4;
- fnic_fcoe_reset_vlans(fnic);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- desc = (struct fip_desc *)(fiph + 1);
- while (rlen > 0) {
- dlen = desc->fip_dlen * FIP_BPW;
- switch (desc->fip_dtype) {
- case FIP_DT_VLAN:
- vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
- shost_printk(KERN_INFO, fnic->lport->host,
- "process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan) {
- /* retry from timer */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- goto out;
- }
- vlan->vid = vid & 0x0fff;
- vlan->state = FIP_VLAN_AVAIL;
- list_add_tail(&vlan->list, &fnic->vlans);
- break;
- }
- desc = (struct fip_desc *)((char *)desc + dlen);
- rlen -= dlen;
}
- /* any VLAN descriptors present ? */
- if (list_empty(&fnic->vlans)) {
- /* retry from timer */
- atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "No VLAN descriptors in FIP VLAN response\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- goto out;
+ /* Do not process if the fnic is already in transitional state */
+ if ((fnic->state != FNIC_IN_ETH_MODE)
+ && (fnic->state != FNIC_IN_FC_MODE)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic in transitional state: %d. link up: %d ignored",
+ fnic->state, vnic_dev_link_status(fnic->vdev));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Current link status: %d iport state: %d\n",
+ fnic->link_status, fnic->iport.state);
+ return;
}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count++;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(fip);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-out:
- return;
-}
-
-static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
-{
- unsigned long flags;
- struct fcoe_vlan *vlan;
- u64 sol_time;
-
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- vlan->sol_count = 1;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
-
- /* start the solicitation */
- fcoe_ctlr_link_up(&fnic->ctlr);
-
- sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
-}
-
-static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
-{
- unsigned long flags;
- struct fcoe_vlan *fvlan;
+ old_link_down_cnt = fnic->link_down_cnt;
+ old_link_status = fnic->link_status;
+ fnic->link_status = vnic_dev_link_status(fnic->vdev);
+ fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
+ while (fnic->reset_in_progress == IN_PROGRESS) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic reset in progress. Link event needs to wait\n");
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "waiting for reset completion\n");
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(5000));
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "woken up from reset completion wait\n");
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+
+ max_count++;
+ if (max_count >= MAX_RESET_WAIT_COUNT) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Rstth waited for too long. Skipping handle link event\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
}
-
- fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- if (fvlan->state == FIP_VLAN_USED) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset in progress\n");
+ fnic->reset_in_progress = IN_PROGRESS;
+
+ if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) ||
+ (fnic->link_status != old_link_status)) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old link status: %d link status: %d\n",
+ old_link_status, (int) fnic->link_status);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "old down count %d down count: %d\n",
+ old_link_down_cnt, (int) fnic->link_down_cnt);
}
- if (fvlan->state == FIP_VLAN_SENT) {
- fvlan->state = FIP_VLAN_USED;
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return 0;
+ if (old_link_status == fnic->link_status) {
+ if (!fnic->link_status) {
+ /* DOWN -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->down\n");
+ } else {
+ if (old_link_down_cnt != fnic->link_down_cnt) {
+ /* UP -> DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->up\n");
+ }
+ }
+ } else if (fnic->link_status) {
+ /* DOWN -> UP */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "down->up. Link up\n");
+ fnic_fdls_link_status_change(fnic, 1);
+ } else {
+ /* UP -> DOWN */
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "up->down. Link down\n");
+ fnic_fdls_link_status_change(fnic, 0);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- return -EINVAL;
-}
-static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
-{
- struct fnic_event *fevt;
- unsigned long flags;
-
- fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
- if (!fevt)
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
- fevt->fnic = fnic;
- fevt->event = ev;
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- list_add_tail(&fevt->list, &fnic->evlist);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- schedule_work(&fnic->event_work);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Marking fnic reset completion\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
-static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+void fnic_handle_frame(struct work_struct *work)
{
- struct fip_header *fiph;
- int ret = 1;
- u16 op;
- u8 sub;
+ struct fnic *fnic = container_of(work, struct fnic, frame_work);
+ struct fnic_frame_list *cur_frame, *next;
+ int fchdr_offset = 0;
- if (!skb || !(skb->data))
- return -1;
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) {
+ if (fnic->stop_rx_link_events) {
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
+ return;
+ }
- if (skb_linearize(skb))
- goto drop;
+ /*
+ * If we're in a transitional state, just re-queue and return.
+ * The queue will be serviced when we get to a stable state.
+ */
+ if (fnic->state != FNIC_IN_FC_MODE &&
+ fnic->state != FNIC_IN_ETH_MODE) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Cannot process frame in transitional state\n");
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ return;
+ }
- fiph = (struct fip_header *)skb->data;
- op = ntohs(fiph->fip_op);
- sub = fiph->fip_subcode;
+ list_del(&cur_frame->links);
- if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
- goto drop;
+ /* Frames from FCP_RQ will have ethhdrs stripped off */
+ fchdr_offset = (cur_frame->rx_ethhdr_stripped) ?
+ 0 : FNIC_ETH_FCOE_HDRS_OFFSET;
- if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
- goto drop;
+ fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp,
+ cur_frame->frame_len, fchdr_offset);
- if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
- if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
- goto drop;
- /* pass it on to fcoe */
- ret = 1;
- } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
- /* set the vlan as used */
- fnic_fcoe_process_vlan_resp(fnic, skb);
- ret = 0;
- } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
- /* received CVL request, restart vlan disc */
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- /* pass it on to fcoe */
- ret = 1;
+ kfree(cur_frame->fp);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
-drop:
- return ret;
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
void fnic_handle_fip_frame(struct work_struct *work)
{
+ struct fnic_frame_list *cur_frame, *next;
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned long flags;
- struct sk_buff *skb;
- struct ethhdr *eh;
- while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Processing FIP frame\n");
+
+ spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
+ list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue,
+ links) {
if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- dev_kfree_skb(skb);
+ list_del(&cur_frame->links);
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
return;
}
+
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
- fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_head(&fnic->fip_frame_queue, skb);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic->state != FNIC_IN_ETH_MODE) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_FIP)) {
- skb_pull(skb, sizeof(*eh));
- if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
- dev_kfree_skb(skb);
- continue;
- }
- /*
- * If there's FLOGI rejects - clear all
- * fcf's & restart from scratch
- */
- if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
- atomic64_inc(
- &fnic_stats->vlan_stats.flogi_rejects);
- shost_printk(KERN_INFO, fnic->lport->host,
- "Trigger a Link down - VLAN Disc\n");
- fcoe_ctlr_link_down(&fnic->ctlr);
- /* start FCoE VLAN discovery */
- fnic_fcoe_send_vlan_req(fnic);
- dev_kfree_skb(skb);
- continue;
- }
- fcoe_ctlr_recv(&fnic->ctlr, skb);
- continue;
+
+ list_del(&cur_frame->links);
+
+ if (fdls_fip_recv_frame(fnic, cur_frame->fp)) {
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
- * @skb: Ethernet Frame.
+ * @fp: Ethernet Frame.
*/
-static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
+static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp)
{
- struct fc_frame *fp;
struct ethhdr *eh;
- struct fcoe_hdr *fcoe_hdr;
- struct fcoe_crc_eof *ft;
+ struct fnic_frame_list *fip_fr_elem;
+ unsigned long flags;
- /*
- * Undo VLAN encapsulation if present.
- */
- eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == htons(ETH_P_8021Q)) {
- memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
- eh = skb_pull(skb, VLAN_HLEN);
- skb_reset_mac_header(skb);
- }
- if (eh->h_proto == htons(ETH_P_FIP)) {
- if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
- printk(KERN_ERR "Dropped FIP frame, as firmware "
- "uses non-FIP mode, Enable FIP "
- "using UCSM\n");
- goto drop;
- }
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- skb_queue_tail(&fnic->fip_frame_queue, skb);
+ eh = (struct ethhdr *) fp;
+ if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) {
+ fip_fr_elem = (struct fnic_frame_list *)
+ kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC);
+ if (!fip_fr_elem)
+ return 0;
+ fip_fr_elem->fp = fp;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
- return 1; /* let caller know packet was used */
- }
- if (eh->h_proto != htons(ETH_P_FCOE))
- goto drop;
- skb_set_network_header(skb, sizeof(*eh));
- skb_pull(skb, sizeof(*eh));
-
- fcoe_hdr = (struct fcoe_hdr *)skb->data;
- if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
- goto drop;
-
- fp = (struct fc_frame *)skb;
- fc_frame_init(fp);
- fr_sof(fp) = fcoe_hdr->fcoe_sof;
- skb_pull(skb, sizeof(struct fcoe_hdr));
- skb_reset_transport_header(skb);
-
- ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
- fr_eof(fp) = ft->fcoe_eof;
- skb_trim(skb, skb->len - sizeof(*ft));
- return 0;
-drop:
- dev_kfree_skb_irq(skb);
- return -1;
+ return 1; /* let caller know packet was used */
+ } else
+ return 0;
}
/**
@@ -720,206 +398,147 @@ drop:
*/
void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
{
- u8 *ctl = fnic->ctlr.ctl_src_addr;
+ struct fnic_iport_s *iport = &fnic->iport;
+ u8 *ctl = iport->hwmac;
u8 *data = fnic->data_src_addr;
if (is_zero_ether_addr(new))
new = ctl;
if (ether_addr_equal(data, new))
return;
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "update_mac %pM\n", new);
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Update MAC: %u\n", *new);
+
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
+
memcpy(data, new, ETH_ALEN);
if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
-/**
- * fnic_update_mac() - set data MAC address and filters.
- * @lport: local port.
- * @new: newly-assigned FCoE MAC address.
- */
-void fnic_update_mac(struct fc_lport *lport, u8 *new)
-{
- struct fnic *fnic = lport_priv(lport);
-
- spin_lock_irq(&fnic->fnic_lock);
- fnic_update_mac_locked(fnic, new);
- spin_unlock_irq(&fnic->fnic_lock);
-}
-
-/**
- * fnic_set_port_id() - set the port_ID after successful FLOGI.
- * @lport: local port.
- * @port_id: assigned FC_ID.
- * @fp: received frame containing the FLOGI accept or NULL.
- *
- * This is called from libfc when a new FC_ID has been assigned.
- * This causes us to reset the firmware to FC_MODE and setup the new MAC
- * address and FC_ID.
- *
- * It is also called with FC_ID 0 when we're logged off.
- *
- * If the FC_ID is due to point-to-point, fp may be NULL.
- */
-void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
-{
- struct fnic *fnic = lport_priv(lport);
- u8 *mac;
- int ret;
-
- FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
- "set port_id 0x%x fp 0x%p\n",
- port_id, fp);
-
- /*
- * If we're clearing the FC_ID, change to use the ctl_src_addr.
- * Set ethernet mode to send FLOGI.
- */
- if (!port_id) {
- fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
- fnic_set_eth_mode(fnic);
- return;
- }
-
- if (fp) {
- mac = fr_cb(fp)->granted_mac;
- if (is_zero_ether_addr(mac)) {
- /* non-FIP - FLOGI already accepted - ignore return */
- fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
- }
- fnic_update_mac(lport, mac);
- }
-
- /* Change state to reflect transition to FC mode */
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
- fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
- else {
- FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "Unexpected fnic state: %s processing FLOGI response",
- fnic_state_to_str(fnic->state));
- spin_unlock_irq(&fnic->fnic_lock);
- return;
- }
- spin_unlock_irq(&fnic->fnic_lock);
-
- /*
- * Send FLOGI registration to firmware to set up FC mode.
- * The new address will be set up when registration completes.
- */
- ret = fnic_flogi_reg_handler(fnic, port_id);
-
- if (ret < 0) {
- spin_lock_irq(&fnic->fnic_lock);
- if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
- fnic->state = FNIC_IN_ETH_MODE;
- spin_unlock_irq(&fnic->fnic_lock);
- }
-}
-
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
- struct fc_frame *fp;
+ uint8_t *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+ unsigned int ethhdr_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe = 0, fcoe_sof, fcoe_eof;
- u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
+ u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0;
u8 fcs_ok = 1, packet_error = 0;
- u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
+ u16 q_number, completed_index, vlan;
u32 rss_hash;
+ u16 checksum;
+ u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 fcoe = 0, fcoe_sof, fcoe_eof;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
+ u16 enet_bytes_written = 0;
+ u32 bytes_written = 0;
unsigned long flags;
+ struct fnic_frame_list *frame_elem = NULL;
+ struct ethhdr *eh;
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- skb = buf->os_buf;
- fp = (struct fc_frame *)skb;
+ DMA_FROM_DEVICE);
+ fp = (uint8_t *) buf->os_buf;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
- cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
- &tmpl, &fcp_bytes_written, &sof, &eof,
- &ingress_port, &packet_error,
- &fcoe_enc_error, &fcs_ok, &vlan_stripped,
- &vlan);
- skb_trim(skb, fcp_bytes_written);
- fr_sof(fp) = sof;
- fr_eof(fp) = eof;
-
+ cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index, &eop, &sop,
+ &fcoe_fnic_crc_ok, &exchange_id, &tmpl,
+ &fcp_bytes_written, &sof, &eof, &ingress_port,
+ &packet_error, &fcoe_enc_error, &fcs_ok,
+ &vlan_stripped, &vlan);
+ ethhdr_stripped = 1;
+ bytes_written = fcp_bytes_written;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop,
- &rss_type, &csum_not_calc, &rss_hash,
- &bytes_written, &packet_error,
- &vlan_stripped, &vlan, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok,
- &fcoe_enc_error, &fcoe_eof,
- &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4,
- &ipv4_fragment, &fcs_ok);
- skb_trim(skb, bytes_written);
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type,
+ &color, &q_number, &completed_index,
+ &ingress_port, &fcoe, &eop, &sop, &rss_type,
+ &csum_not_calc, &rss_hash, &enet_bytes_written,
+ &packet_error, &vlan_stripped, &vlan,
+ &checksum, &fcoe_sof, &fcoe_fnic_crc_ok,
+ &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok,
+ &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4,
+ &ipv4_fragment, &fcs_ok);
+
+ ethhdr_stripped = 0;
+ bytes_written = enet_bytes_written;
+
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcs error. dropping packet.\n");
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic 0x%p fcs error. Dropping packet.\n", fnic);
goto drop;
}
- if (fnic_import_rq_eth_pkt(fnic, skb))
- return;
+ eh = (struct ethhdr *) fp;
+ if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) {
+ if (fnic_import_rq_eth_pkt(fnic, fp))
+ return;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Dropping h_proto 0x%x",
+ be16_to_cpu(eh->h_proto));
+ goto drop;
+ }
} else {
- /* wrong CQ type*/
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic rq_cmpl wrong cq type x%x\n", type);
+ /* wrong CQ type */
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
- if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+ if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic rq_cmpl fcoe x%x fcsok x%x"
- " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
- " x%x\n",
- fcoe, fcs_ok, packet_error,
- fcoe_fc_crc_ok, fcoe_enc_error);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n",
+ fcoe, fcs_ok, packet_error,
+ fcoe_fnic_crc_ok, fcoe_enc_error);
goto drop;
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic->stop_rx_link_events: %d\n",
+ fnic->stop_rx_link_events);
goto drop;
}
- fr_dev(fp) = fnic->lport;
+
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
- (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ goto drop;
}
+ frame_elem->fp = fp;
+ frame_elem->rx_ethhdr_stripped = ethhdr_stripped;
+ frame_elem->frame_len = bytes_written;
- skb_queue_tail(&fnic->frame_queue, skb);
- queue_work(fnic_event_queue, &fnic->frame_work);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&frame_elem->links, &fnic->frame_queue);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ queue_work(fnic_event_queue, &fnic->frame_work);
return;
+
drop:
- dev_kfree_skb_irq(skb);
+ kfree(fp);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
@@ -945,10 +564,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
- if (cur_work_done) {
+ if (cur_work_done && fnic->stop_rx_link_events != 1) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"fnic_alloc_rq_frame can't alloc"
" frame\n");
}
@@ -966,218 +585,181 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb;
+ void *buf;
u16 len;
dma_addr_t pa;
- int r;
+ int ret;
- len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
- skb = dev_alloc_skb(len);
- if (!skb) {
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Unable to allocate RQ sk_buff\n");
+ len = FNIC_FRAME_HT_ROOM;
+ buf = kmalloc(len, GFP_ATOMIC);
+ if (!buf) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unable to allocate RQ buffer of size: %d\n", len);
return -ENOMEM;
}
- skb_reset_mac_header(skb);
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- skb_put(skb, len);
- pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+
+ pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- r = -ENOMEM;
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
- goto free_skb;
+ ret = -ENOMEM;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "PCI mapping failed with error %d\n", ret);
+ goto free_buf;
}
- fnic_queue_rq_desc(rq, skb, pa, len);
+ fnic_queue_rq_desc(rq, buf, pa, len);
return 0;
-
-free_skb:
- kfree_skb(skb);
- return r;
+free_buf:
+ kfree(buf);
+ return ret;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
+ void *rq_buf = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(rq_buf);
buf->os_buf = NULL;
}
-/**
- * fnic_eth_send() - Send Ethernet frame.
- * @fip: fcoe_ctlr instance.
- * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
- */
-void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
-{
- struct fnic *fnic = fnic_from_ctlr(fip);
- struct vnic_wq *wq = &fnic->wq[0];
- dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- unsigned long flags;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr = (struct ethhdr *)skb_mac_header(skb);
- vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
- memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- } else {
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
- FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
- }
- }
-
- pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- printk(KERN_ERR "DMA mapping failed\n");
- goto free_skb;
- }
-
- spin_lock_irqsave(&fnic->wq_lock[0], flags);
- if (!vnic_wq_desc_avail(wq))
- goto irq_restore;
-
- fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1);
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- return;
-
-irq_restore:
- spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
-free_skb:
- kfree_skb(skb);
-}
-
/*
* Send FC frame.
*/
-static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
+static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
{
struct vnic_wq *wq = &fnic->wq[0];
- struct sk_buff *skb;
dma_addr_t pa;
- struct ethhdr *eth_hdr;
- struct vlan_ethhdr *vlan_hdr;
- struct fcoe_hdr *fcoe_hdr;
- struct fc_frame_header *fh;
- u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
- fh = fc_frame_header_get(fp);
- skb = fp_skb(fp);
-
- if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
- fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
- return 0;
-
- if (!fnic->vlan_hw_insert) {
- eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
- vlan_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr = (struct ethhdr *)vlan_hdr;
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
- vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
- fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
- } else {
- eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
- eth_hdr = skb_push(skb, eth_hdr_len);
- eth_hdr->h_proto = htons(ETH_P_FCOE);
- fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
- }
-
- if (fnic->ctlr.map_dest)
- fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
- else
- memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
- memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
-
- tot_len = skb->len;
- BUG_ON(tot_len % 4);
-
- memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
- fcoe_hdr->fcoe_sof = fr_sof(fp);
- if (FC_FCOE_VER)
- FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
-
- pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
- if (dma_mapping_error(&fnic->pdev->dev, pa)) {
- ret = -ENOMEM;
- printk(KERN_ERR "DMA map failed with error %d\n", ret);
- goto free_skb_on_err;
- }
+ pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa))
+ return -ENOMEM;
- if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
- (char *)eth_hdr, tot_len)) != 0) {
- printk(KERN_ERR "fnic ctlr frame trace error!!!");
+ if ((fnic_fc_trace_set_data(fnic->fnic_num,
+ FNIC_FC_SEND | 0x80, (char *) frame,
+ frame_len)) != 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic ctlr frame trace error");
}
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "vnic work queue descriptor is not available");
ret = -1;
- goto irq_restore;
+ goto fnic_send_frame_end;
}
- fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- 0 /* hw inserts cos value */,
- fnic->vlan_id, 1, 1, 1);
+ /* hw inserts cos value */
+ fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T,
+ 0, fnic->vlan_id, 1, 1, 1);
-irq_restore:
+fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-
-free_skb_on_err:
- if (ret)
- dev_kfree_skb_any(fp_skb(fp));
-
return ret;
}
-/*
- * fnic_send
- * Routine to send a raw frame
+/**
+ * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE
+ * info. This interface is used only in the non fast path. (login, fabric
+ * registrations etc.)
+ *
+ * @fnic: fnic instance
+ * @frame: frame structure with FC payload filled in
+ * @frame_size: length of the frame to be sent
+ * @srcmac: source mac address
+ * @dstmac: destination mac address
+ *
+ * Called with the fnic lock held.
*/
-int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
+static int
+fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size,
+ uint8_t *srcmac, uint8_t *dstmac)
{
- struct fnic *fnic = lport_priv(lp);
- unsigned long flags;
+ struct ethhdr *pethhdr;
+ struct fcoe_hdr *pfcoe_hdr;
+ struct fnic_frame_list *frame_elem;
+ int len = frame_size;
+ int ret;
+ struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame +
+ FNIC_ETH_FCOE_HDRS_OFFSET);
- if (fnic->in_remove) {
- dev_kfree_skb(fp_skb(fp));
- return -1;
- }
+ pethhdr = (struct ethhdr *) frame;
+ pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE);
+ memcpy(pethhdr->h_source, srcmac, ETH_ALEN);
+ memcpy(pethhdr->h_dest, dstmac, ETH_ALEN);
+
+ pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr));
+ pfcoe_hdr->fcoe_sof = FC_SOF_I3;
/*
* Queue frame if in a transitional state.
* This occurs while registering the Port_ID / MAC address after FLOGI.
*/
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
- skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic->state != FNIC_IN_FC_MODE)
+ && (fnic->state != FNIC_IN_ETH_MODE)) {
+ frame_elem = mempool_alloc(fnic->frame_elem_pool,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!frame_elem) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to allocate memory for frame elem");
+ return -ENOMEM;
+ }
+
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n",
+ ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id),
+ fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr));
+ frame_elem->fp = frame;
+ frame_elem->frame_len = len;
+ list_add_tail(&frame_elem->links, &fnic->tx_queue);
return 0;
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return fnic_send_frame(fnic, fp);
+ fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing");
+
+ ret = fnic_send_frame(fnic, frame, len);
+ return ret;
+}
+
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+ uint8_t *dstmac, *srcmac;
+
+ /* If module unload is in-progress, don't send */
+ if (fnic->in_remove)
+ return;
+
+ if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) {
+ srcmac = iport->fpma;
+ dstmac = iport->fcfmac;
+ } else {
+ srcmac = iport->hwmac;
+ dstmac = FCOE_ALL_FCF_MAC;
+ }
+
+ fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac);
+}
+
+int
+fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size)
+{
+ struct fnic *fnic = iport->fnic;
+
+ if (fnic->in_remove)
+ return -1;
+
+ fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing");
+ return fnic_send_frame(fnic, frame, frame_size);
}
/**
@@ -1193,64 +775,87 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
void fnic_flush_tx(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, flush_work);
- struct sk_buff *skb;
struct fc_frame *fp;
+ struct fnic_frame_list *cur_frame, *next;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Flush queued frames");
- while ((skb = skb_dequeue(&fnic->tx_queue))) {
- fp = (struct fc_frame *)skb;
- fnic_send_frame(fnic, fp);
+ list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) {
+ fp = cur_frame->fp;
+ list_del(&cur_frame->links);
+ fnic_send_frame(fnic, fp, cur_frame->frame_len);
+ mempool_free(cur_frame, fnic->frame_elem_pool);
}
}
-/**
- * fnic_set_eth_mode() - put fnic into ethernet mode.
- * @fnic: fnic device
- *
- * Called without fnic lock held.
- */
-static void fnic_set_eth_mode(struct fnic *fnic)
+int
+fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp)
{
- unsigned long flags;
- enum fnic_state old_state;
+ struct fnic *fnic = iport->fnic;
+ struct ethhdr *ethhdr;
int ret;
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-again:
- old_state = fnic->state;
- switch (old_state) {
- case FNIC_IN_FC_MODE:
- case FNIC_IN_ETH_TRANS_FC_MODE:
- default:
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id,
+ fp, fnic->state);
- ret = fnic_fw_reset_handler(fnic);
+ if (fp) {
+ ethhdr = (struct ethhdr *) fp;
+ vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest);
+ }
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
- goto again;
- if (ret)
- fnic->state = old_state;
- break;
-
- case FNIC_IN_FC_TRANS_ETH_MODE:
- case FNIC_IN_ETH_MODE:
- break;
+ /* Change state to reflect transition to FC mode */
+ if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
+ fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
+ else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unexpected fnic state while processing FLOGI response\n");
+ return -1;
+ }
+
+ /*
+ * Send FLOGI registration to firmware to set up FC mode.
+ * The new address will be set up when registration completes.
+ */
+ ret = fnic_flogi_reg_handler(fnic, port_id);
+ if (ret < 0) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration error ret: %d fnic state: %d\n",
+ ret, fnic->state);
+ if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
+ fnic->state = FNIC_IN_ETH_MODE;
+
+ return -1;
+ }
+ iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI registration success\n");
+ return 0;
+}
+
+void fnic_free_txq(struct list_head *head)
+{
+ struct fnic_frame_list *cur_frame, *next;
+
+ list_for_each_entry_safe(cur_frame, next, head, links) {
+ list_del(&cur_frame->links);
+ kfree(cur_frame->fp);
+ kfree(cur_frame);
}
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
- struct sk_buff *skb = buf->os_buf;
- struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb_irq(fp_skb(fp));
+ mempool_free(buf->os_buf, fnic->frame_pool);
buf->os_buf = NULL;
}
@@ -1288,119 +893,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
- struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
- dev_kfree_skb(fp_skb(fp));
+ kfree(buf->os_buf);
buf->os_buf = NULL;
}
-void fnic_fcoe_reset_vlans(struct fnic *fnic)
+void
+fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport,
+ unsigned long flags)
{
+ struct fnic *fnic = iport->fnic;
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids;
+ struct rport_dd_data_s *rdd_data;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Adding rport fcid: 0x%x", tport->fcid);
+
+ ids.node_name = tport->wwnn;
+ ids.port_name = tport->wwpn;
+ ids.port_id = tport->fcid;
+ ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ rport = fc_remote_port_add(fnic->host, 0, &ids);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (!rport) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Failed to add rport for tport: 0x%x", tport->fcid);
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Added rport fcid: 0x%x", tport->fcid);
+
+ /* Mimic these assignments in queuecommand to avoid timing issues */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+}
+
+void
+fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags)
+{
+ struct fnic *fnic = iport->fnic;
+ struct rport_dd_data_s *rdd_data;
+
+ struct fc_rport *rport;
+
+ if (!tport)
+ return;
+
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE);
+ rport = tport->rport;
+
+ if (rport) {
+ /* tport resource release will be done
+ * after fnic_terminate_rport_io()
+ */
+ tport->flags |= FNIC_FDLS_TPORT_DELETED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ /* Interface to scsi_fc_transport */
+ fc_remote_port_delete(rport);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Deregistered and freed tport fcid: 0x%x from scsi transport fc",
+ tport->fcid);
+
+ /*
+ * the dd_data is allocated by fc transport
+ * of size dd_fcrport_size
+ */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = NULL;
+ rdd_data->iport = NULL;
+ list_del(&tport->links);
+ kfree(tport);
+ } else {
+ fnic_del_tport_timer_sync(fnic, tport);
+ list_del(&tport->links);
+ kfree(tport);
+ }
+}
+
+void fnic_delete_fcp_tports(struct fnic *fnic)
+{
+ struct fnic_tport_s *tport, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fcoe_vlan *next;
- /*
- * indicate a link down to fcoe so that all fcf's are free'd
- * might not be required since we did this before sending vlan
- * discovery request
- */
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (!list_empty(&fnic->vlans)) {
- list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
- list_del(&vlan->list);
- kfree(vlan);
- }
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "removing fcp rport fcid: 0x%x", tport->fcid);
+ fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING);
+ fnic_del_tport_timer_sync(fnic, tport);
+ fnic_fdls_remove_tport(&fnic->iport, tport, flags);
}
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
-void fnic_handle_fip_timer(struct fnic *fnic)
+/**
+ * fnic_tport_event_handler() - Handler for remote port events
+ * in the tport_event_queue.
+ *
+ * @work: Handle to the remote port being dequeued
+ */
+void fnic_tport_event_handler(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, tport_work);
+ struct fnic_tport_event_s *cur_evt, *next;
unsigned long flags;
- struct fcoe_vlan *vlan;
- struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- u64 sol_time;
+ struct fnic_tport_s *tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->stop_rx_link_events) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ tport = cur_evt->arg1;
+ switch (cur_evt->event) {
+ case TGT_EV_RPORT_ADD:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Add rport event");
+ if (tport->state == FDLS_TGT_STATE_READY) {
+ fnic_fdls_add_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Target not ready. Add rport event dropped: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_RPORT_DEL:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Remove rport event");
+ if (tport->state == FDLS_TGT_STATE_OFFLINING) {
+ fnic_fdls_remove_tport(&fnic->iport,
+ (struct fnic_tport_s *) cur_evt->arg1, flags);
+ } else {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "remove rport event dropped tport fcid: 0x%x",
+ tport->fcid);
+ }
+ break;
+ case TGT_EV_TPORT_DELETE:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Delete tport event");
+ fdls_delete_tport(tport->iport, tport);
+ break;
+ default:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown tport event");
+ break;
+ }
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
- return;
+void fnic_flush_tport_event_list(struct fnic *fnic)
+{
+ struct fnic_tport_event_s *cur_evt, *next;
+ unsigned long flags;
- spin_lock_irqsave(&fnic->vlans_lock, flags);
- if (list_empty(&fnic->vlans)) {
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* no vlans available, try again */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) {
+ list_del(&cur_evt->links);
+ kfree(cur_evt);
}
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan %d state %d sol_count %d\n",
- vlan->vid, vlan->state, vlan->sol_count);
- switch (vlan->state) {
- case FIP_VLAN_USED:
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FIP VLAN is selected for FC transaction\n");
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- break;
- case FIP_VLAN_FAILED:
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- /* if all vlans are in failed state, restart vlan disc */
- if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
- if (printk_ratelimit())
- shost_printk(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- break;
- case FIP_VLAN_SENT:
- if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
- /*
- * no response on this vlan, remove from the list.
- * Try the next vlan
- */
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Dequeue this VLAN ID %d from list\n",
- vlan->vid);
- list_del(&vlan->list);
- kfree(vlan);
- vlan = NULL;
- if (list_empty(&fnic->vlans)) {
- /* we exhausted all vlans, restart vlan disc */
- spin_unlock_irqrestore(&fnic->vlans_lock,
- flags);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "fip_timer: vlan list empty, "
- "trigger vlan disc\n");
- fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
- return;
- }
- /* check the next vlan */
- vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
- list);
- fnic->set_vlan(fnic, vlan->vid);
- vlan->state = FIP_VLAN_SENT; /* sent now */
- }
- spin_unlock_irqrestore(&fnic->vlans_lock, flags);
- atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
- vlan->sol_count++;
- sol_time = jiffies + msecs_to_jiffies
- (FCOE_CTLR_START_DELAY);
- mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
- break;
+void fnic_reset_work_handler(struct work_struct *work)
+{
+ struct fnic *cur_fnic, *next_fnic;
+ unsigned long reset_fnic_list_lock_flags;
+ int host_reset_ret_code;
+
+ /*
+ * This is a single thread. It is per fnic module, not per fnic
+ * All the fnics that need to be reset
+ * have been serialized via the reset fnic list.
+ */
+ spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags);
+ list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) {
+ list_del(&cur_fnic->links);
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
+
+ dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n",
+ cur_fnic->fnic_num);
+ host_reset_ret_code = fnic_host_reset(cur_fnic->host);
+ dev_err(&cur_fnic->pdev->dev,
+ "fnic: <%d>: returned from host reset with status: %d\n",
+ cur_fnic->fnic_num, host_reset_ret_code);
+
+ spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+ cur_fnic->pc_rscn_handling_status =
+ PC_RSCN_HANDLING_NOT_IN_PROGRESS;
+ spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags);
+
+ spin_lock_irqsave(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
+ spin_unlock_irqrestore(&reset_fnic_list_lock,
+ reset_fnic_list_lock_flags);
}
diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h
new file mode 100644
index 000000000000..531d0b37e450
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_fdls.h
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#ifndef _FNIC_FDLS_H_
+#define _FNIC_FDLS_H_
+
+#include "fnic_stats.h"
+#include "fdls_fc.h"
+
+/* FDLS - Fabric discovery and login services
+ * -> VLAN discovery
+ * -> retry every retry delay seconds until it succeeds.
+ * <- List of VLANs
+ *
+ * -> Solicitation
+ * <- Solicitation response (Advertisement)
+ *
+ * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV)
+ * <- FLOGI response
+ *
+ * -> FCF keep alive
+ * <- FCF keep alive
+ *
+ * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg
+ *
+ * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- SCR response
+ * -> Retry SCR - Number of retries 2
+ *
+ * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a
+ * -> Retry on BUSY until it succeeds
+ * -> Retry on BUSY until it succeeds
+ * -> 2 retries on timeout
+ *
+ * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 )
+ * -> Ignore if both retires fail.
+ *
+ * Session establishment with targets
+ * For each PWWN
+ * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PLOGI response
+ * -> Retry PLOGI. Num retries using vnic.cfg
+ *
+ * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV)
+ * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV)
+ * <- PRLI response
+ * -> Retry PRLI. Num retries using vnic.cfg
+ *
+ */
+
+#define FDLS_RETRY_COUNT 2
+
+/*
+ * OXID encoding:
+ * bits 0-8: oxid idx - allocated from poool
+ * bits 9-13: oxid frame code from fnic_oxid_frame_type_e
+ * bits 14-15: all zeros
+ */
+#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */
+#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx)
+#define FNIC_FRAME_MASK 0xFE00
+#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK)
+#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1))
+
+#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */
+
+#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1
+#define FNIC_FDLS_FPMA_LEARNT 0x2
+
+/* tport flags */
+#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1
+#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2
+#define FNIC_FDLS_TPORT_SEND_ADISC 0x4
+#define FNIC_FDLS_RETRY_FRAME 0x8
+#define FNIC_FDLS_TPORT_BUSY 0x10
+#define FNIC_FDLS_TPORT_TERMINATING 0x20
+#define FNIC_FDLS_TPORT_DELETED 0x40
+#define FNIC_FDLS_SCSI_REGISTERED 0x200
+
+/* Retry supported by rport(returned by prli service parameters) */
+#define FDLS_FC_RP_FLAGS_RETRY 0x1
+
+#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state)
+#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state)
+
+#define FNIC_FDMI_ACTIVE 0x8
+#define FNIC_FIRST_LINK_UP 0x2
+
+#define fdls_set_tport_state(_tport, _state) (_tport->state = _state)
+#define fdls_get_tport_state(_tport) (_tport->state)
+
+#define FNIC_PORTSPEED_10GBIT 1
+#define FNIC_FRAME_HT_ROOM (2148)
+#define FNIC_FCOE_FRAME_MAXSZ (2112)
+
+
+#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000
+#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200
+#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400
+#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600
+#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800
+#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00
+#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00
+#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00
+#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000
+#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200
+#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400
+#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600
+#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800
+#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00
+#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00
+
+struct fnic_fip_fcf_s {
+ uint16_t vlan_id;
+ uint8_t fcf_mac[6];
+ uint8_t fcf_priority;
+ uint32_t fka_adv_period;
+ uint8_t ka_disabled;
+};
+
+enum fnic_fdls_state_e {
+ FDLS_STATE_INIT = 0,
+ FDLS_STATE_LINKDOWN,
+ FDLS_STATE_FABRIC_LOGO,
+ FDLS_STATE_FLOGO_DONE,
+ FDLS_STATE_FABRIC_FLOGI,
+ FDLS_STATE_FABRIC_PLOGI,
+ FDLS_STATE_RPN_ID,
+ FDLS_STATE_REGISTER_FC4_TYPES,
+ FDLS_STATE_REGISTER_FC4_FEATURES,
+ FDLS_STATE_SCR,
+ FDLS_STATE_GPN_FT,
+ FDLS_STATE_TGT_DISCOVERY,
+ FDLS_STATE_RSCN_GPN_FT,
+ FDLS_STATE_SEND_GPNFT
+};
+
+struct fnic_fdls_fabric_s {
+ enum fnic_fdls_state_e state;
+ uint32_t flags;
+ struct list_head tport_list; /* List of discovered tports */
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int del_fdmi_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ int fdmi_retry;
+ struct timer_list fdmi_timer;
+ int fdmi_pending;
+};
+
+struct fnic_fdls_fip_s {
+ uint32_t state;
+ uint32_t flogi_retry;
+};
+
+/* Message to tport_event_handler */
+enum fnic_tgt_msg_id {
+ TGT_EV_NONE = 0,
+ TGT_EV_RPORT_ADD,
+ TGT_EV_RPORT_DEL,
+ TGT_EV_TPORT_DELETE,
+ TGT_EV_REMOVE
+};
+
+struct fnic_tport_event_s {
+ struct list_head links;
+ enum fnic_tgt_msg_id event;
+ void *arg1;
+};
+
+enum fdls_tgt_state_e {
+ FDLS_TGT_STATE_INIT = 0,
+ FDLS_TGT_STATE_PLOGI,
+ FDLS_TGT_STATE_PRLI,
+ FDLS_TGT_STATE_READY,
+ FDLS_TGT_STATE_LOGO_RECEIVED,
+ FDLS_TGT_STATE_ADISC,
+ FDL_TGT_STATE_PLOGO,
+ FDLS_TGT_STATE_OFFLINING,
+ FDLS_TGT_STATE_OFFLINE
+};
+
+struct fnic_tport_s {
+ struct list_head links; /* To link the tports */
+ enum fdls_tgt_state_e state;
+ uint32_t flags;
+ uint32_t fcid;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t active_oxid;
+ uint16_t tgt_flags;
+ atomic_t in_flight; /* io counter */
+ uint16_t max_payload_size;
+ uint16_t r_a_tov;
+ uint16_t e_d_tov;
+ uint16_t lun0_delay;
+ int max_concur_seqs;
+ uint32_t fcp_csp;
+ struct timer_list retry_timer;
+ int del_timer_inprogress;
+ int retry_counter;
+ int timer_pending;
+ unsigned int num_pending_cmds;
+ int nexus_restart_count;
+ int exch_reset_in_progress;
+ void *iport;
+ struct work_struct tport_del_work;
+ struct completion *tport_del_done;
+ struct fc_rport *rport;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+/* OXID pool related structures */
+struct reclaim_entry_s {
+ struct list_head links;
+ /* oxid that needs to be freed after 2*r_a_tov */
+ uint16_t oxid_idx;
+ /* in jiffies. Use this to waiting time */
+ unsigned long expires;
+ unsigned long *bitmap;
+};
+
+/* used for allocating oxids for fabric and fdmi requests */
+struct fnic_oxid_pool_s {
+ DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ);
+ int sz; /* size of the pool or block */
+ int next_idx; /* used for cycling through the oxid pool */
+
+ /* retry schedule free */
+ DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ);
+ struct delayed_work schedule_oxid_free_retry;
+
+ /* List of oxids that need to be freed and reclaimed.
+ * This list is shared by all the oxid pools
+ */
+ struct list_head oxid_reclaim_list;
+ /* Work associated with reclaim list */
+ struct delayed_work oxid_reclaim_work;
+};
+
+/* iport */
+enum fnic_iport_state_e {
+ FNIC_IPORT_STATE_INIT = 0,
+ FNIC_IPORT_STATE_LINK_WAIT,
+ FNIC_IPORT_STATE_FIP,
+ FNIC_IPORT_STATE_FABRIC_DISC,
+ FNIC_IPORT_STATE_READY
+};
+
+struct fnic_iport_s {
+ enum fnic_iport_state_e state;
+ struct fnic *fnic;
+ uint64_t boot_time;
+ uint32_t flags;
+ int usefip;
+ uint8_t hwmac[6]; /* HW MAC Addr */
+ uint8_t fpma[6]; /* Fabric Provided MA */
+ uint8_t fcfmac[6]; /* MAC addr of Fabric */
+ uint16_t vlan_id;
+ uint32_t fcid;
+
+ /* oxid pool */
+ struct fnic_oxid_pool_s oxid_pool;
+
+ /*
+ * fabric reqs are serialized and only one req at a time.
+ * Tracking the oxid for sending abort
+ */
+ uint16_t active_oxid_fabric_req;
+ /* fdmi only */
+ uint16_t active_oxid_fdmi_plogi;
+ uint16_t active_oxid_fdmi_rhba;
+ uint16_t active_oxid_fdmi_rpa;
+
+ struct fnic_fip_fcf_s selected_fcf;
+ struct fnic_fdls_fip_s fip;
+ struct fnic_fdls_fabric_s fabric;
+ struct list_head tport_list;
+ struct list_head tport_list_pending_del;
+ /* list of tports for which we are yet to send PLOGO */
+ struct list_head inprocess_tport_list;
+ struct list_head deleted_tport_list;
+ struct work_struct tport_event_work;
+ uint32_t e_d_tov; /* msec */
+ uint32_t r_a_tov; /* msec */
+ uint32_t link_supported_speeds;
+ uint32_t max_flogi_retries;
+ uint32_t max_plogi_retries;
+ uint32_t plogi_timeout;
+ uint32_t service_params;
+ uint64_t wwpn;
+ uint64_t wwnn;
+ uint16_t max_payload_size;
+ spinlock_t deleted_tport_lst_lock;
+ struct completion *flogi_reg_done;
+ struct fnic_iport_stats iport_stats;
+ char str_wwpn[20];
+ char str_wwnn[20];
+};
+
+struct rport_dd_data_s {
+ struct fnic_tport_s *tport;
+ struct fnic_iport_s *iport;
+};
+
+enum fnic_recv_frame_type_e {
+ FNIC_FABRIC_FLOGI_RSP = 1,
+ FNIC_FABRIC_PLOGI_RSP,
+ FNIC_FABRIC_RPN_RSP,
+ FNIC_FABRIC_RFT_RSP,
+ FNIC_FABRIC_RFF_RSP,
+ FNIC_FABRIC_SCR_RSP,
+ FNIC_FABRIC_GPN_FT_RSP,
+ FNIC_FABRIC_BLS_ABTS_RSP,
+ FNIC_FDMI_PLOGI_RSP,
+ FNIC_FDMI_REG_HBA_RSP,
+ FNIC_FDMI_RPA_RSP,
+ FNIC_FDMI_BLS_ABTS_RSP,
+ FNIC_FABRIC_LOGO_RSP,
+
+ /* responses to target requests */
+ FNIC_TPORT_PLOGI_RSP,
+ FNIC_TPORT_PRLI_RSP,
+ FNIC_TPORT_ADISC_RSP,
+ FNIC_TPORT_BLS_ABTS_RSP,
+ FNIC_TPORT_LOGO_RSP,
+
+ /* unsolicited requests */
+ FNIC_BLS_ABTS_REQ,
+ FNIC_ELS_PLOGI_REQ,
+ FNIC_ELS_RSCN_REQ,
+ FNIC_ELS_LOGO_REQ,
+ FNIC_ELS_ECHO_REQ,
+ FNIC_ELS_ADISC,
+ FNIC_ELS_RLS,
+ FNIC_ELS_RRQ,
+ FNIC_ELS_UNSUPPORTED_REQ,
+};
+
+enum fnic_port_speeds {
+ DCEM_PORTSPEED_NONE = 0,
+ DCEM_PORTSPEED_1G = 1000,
+ DCEM_PORTSPEED_2G = 2000,
+ DCEM_PORTSPEED_4G = 4000,
+ DCEM_PORTSPEED_8G = 8000,
+ DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_16G = 16000,
+ DCEM_PORTSPEED_20G = 20000,
+ DCEM_PORTSPEED_25G = 25000,
+ DCEM_PORTSPEED_32G = 32000,
+ DCEM_PORTSPEED_40G = 40000,
+ DCEM_PORTSPEED_4x10G = 41000,
+ DCEM_PORTSPEED_50G = 50000,
+ DCEM_PORTSPEED_64G = 64000,
+ DCEM_PORTSPEED_100G = 100000,
+ DCEM_PORTSPEED_128G = 128000,
+};
+
+/* Function Declarations */
+/* fdls_disc.c */
+void fnic_fdls_disc_init(struct fnic_iport_s *iport);
+void fnic_fdls_disc_start(struct fnic_iport_s *iport);
+void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame,
+ int len, int fchdr_offset);
+void fnic_fdls_link_down(struct fnic_iport_s *iport);
+int fdls_init_frame_pool(struct fnic_iport_s *iport);
+uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport);
+uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type,
+ uint16_t *active_oxid);
+void fdls_free_oxid(struct fnic_iport_s *iport,
+ uint16_t oxid, uint16_t *active_oxid);
+void fdls_tgt_logout(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fnic_del_fabric_timer_sync(struct fnic *fnic);
+void fnic_del_tport_timer_sync(struct fnic *fnic,
+ struct fnic_tport_s *tport);
+void fdls_send_fabric_logo(struct fnic_iport_s *iport);
+int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
+ struct fc_frame_header *fchdr);
+void fdls_send_tport_abts(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+bool fdls_delete_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport);
+void fdls_fdmi_timer_callback(struct timer_list *t);
+void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport);
+
+/* fnic_fcs.c */
+void fnic_fdls_init(struct fnic *fnic, int usefip);
+void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame,
+ int frame_size);
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+int fnic_send_fip_frame(struct fnic_iport_s *iport,
+ void *frame, int frame_size);
+void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame,
+ uint8_t *fcid);
+void fnic_fdls_add_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport, unsigned long flags);
+void fnic_fdls_remove_tport(struct fnic_iport_s *iport,
+ struct fnic_tport_s *tport,
+ unsigned long flags);
+
+/* fip.c */
+void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+void fnic_common_fip_cleanup(struct fnic *fnic);
+int fdls_fip_recv_frame(struct fnic *fnic, void *frame);
+void fnic_handle_fcs_ka_timer(struct timer_list *t);
+void fnic_handle_enode_ka_timer(struct timer_list *t);
+void fnic_handle_vn_ka_timer(struct timer_list *t);
+void fnic_handle_fip_timer(struct timer_list *t);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+
+/* fnic_scsi.c */
+void fnic_scsi_fcpio_reset(struct fnic *fnic);
+extern void fdls_fabric_timer_callback(struct timer_list *t);
+void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid);
+int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id,
+ void *fp);
+struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport,
+ uint32_t fcid);
+struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport,
+ uint64_t wwpn);
+
+#endif /* _FNIC_FDLS_H_ */
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
deleted file mode 100644
index 79f53029737b..000000000000
--- a/drivers/scsi/fnic/fnic_fip.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2008 Cisco Systems, Inc. All rights reserved.
- * Copyright 2007 Nuova Systems, Inc. All rights reserved.
- */
-
-#ifndef _FNIC_FIP_H_
-#define _FNIC_FIP_H_
-
-
-#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
-#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
-#define FCOE_CTLR_MAX_SOL 8
-
-#define FINC_MAX_FLOGI_REJECTS 8
-
-struct vlan {
- __be16 vid;
- __be16 type;
-};
-
-/*
- * VLAN entry.
- */
-struct fcoe_vlan {
- struct list_head list;
- u16 vid; /* vlan ID */
- u16 sol_count; /* no. of sols sent */
- u16 state; /* state */
-};
-
-enum fip_vlan_state {
- FIP_VLAN_AVAIL = 0, /* don't do anything */
- FIP_VLAN_SENT = 1, /* sent */
- FIP_VLAN_USED = 2, /* succeed */
- FIP_VLAN_FAILED = 3, /* failed to response */
-};
-
-struct fip_vlan {
- struct ethhdr eth;
- struct fip_header fip;
- struct {
- struct fip_mac_desc mac;
- struct fip_wwn_desc wwnn;
- } desc;
-};
-
-#endif /* __FINC_FIP_H_ */
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 5895ead20e14..0d974e040ab7 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -7,6 +7,7 @@
#define _FNIC_IO_H_
#include <scsi/fc/fc_fcp.h>
+#include "fnic_fdls.h"
#define FNIC_DFLT_SG_DESC_CNT 32
#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
@@ -41,6 +42,8 @@ enum fnic_ioreq_state {
};
struct fnic_io_req {
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct host_sg_desc *sgl_list; /* sgl list */
void *sgl_list_alloc; /* sgl list address used for free */
dma_addr_t sense_buf_pa; /* dma address for sense buffer*/
@@ -55,15 +58,4 @@ struct fnic_io_req {
unsigned int tag;
struct scsi_cmnd *sc; /* midlayer's cmd pointer */
};
-
-enum fnic_port_speeds {
- DCEM_PORTSPEED_NONE = 0,
- DCEM_PORTSPEED_1G = 1000,
- DCEM_PORTSPEED_10G = 10000,
- DCEM_PORTSPEED_20G = 20000,
- DCEM_PORTSPEED_25G = 25000,
- DCEM_PORTSPEED_40G = 40000,
- DCEM_PORTSPEED_4x10G = 41000,
- DCEM_PORTSPEED_100G = 100000,
-};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index ff85441c6cea..7ed50b11afa6 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -7,7 +7,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <scsi/libfc.h>
+#include <scsi/scsi_transport_fc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
@@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"request_irq failed with error: %d\n",
err);
fnic_free_intr(fnic);
@@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq-array size: %d wq-array size: %d copy-wq array size: %d\n",
n, m, o);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n",
fnic->rq_count, fnic->raw_wq_count,
fnic->wq_copy_count, fnic->cq_count);
@@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"allocated %d MSI-X vectors\n",
vec_count);
if (vec_count > 0) {
if (vec_count < vecs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"interrupts number mismatch: vec_count: %d vecs: %d\n",
vec_count, vecs);
if (vec_count < min_irqs) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"no interrupts for copy wq\n");
return 1;
}
@@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->wq_copy_count = vec_count - n - m - 1;
fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count;
if (fnic->cq_count != vec_count - 1) {
- FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"CQ count: %d does not match MSI-X vector count: %d\n",
fnic->cq_count, vec_count);
fnic->cq_count = vec_count - 1;
@@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic)
fnic->intr_count = vec_count;
fnic->err_intr_offset = fnic->rq_count + fnic->wq_count;
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"rq_count: %d raw_wq_count: %d copy_wq_base: %d\n",
fnic->rq_count,
fnic->raw_wq_count, fnic->copy_wq_base);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"wq_copy_count: %d wq_count: %d cq_count: %d\n",
fnic->wq_copy_count,
fnic->wq_count, fnic->cq_count);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"intr_count: %d err_intr_offset: %u",
fnic->intr_count,
fnic->err_intr_offset);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX);
- FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic using MSI-X\n");
return 0;
}
@@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
@@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->cq_count = 3;
fnic->intr_count = 3;
- FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index adec0df24bc4..9a357ff42085 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -16,21 +16,20 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_tcq.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
-#include "fnic_fip.h"
#include "fnic.h"
+#include "fnic_fdls.h"
+#include "fdls_fc.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@@ -39,12 +38,18 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
+static struct kmem_cache *fdls_frame_cache;
+static struct kmem_cache *fdls_frame_elem_cache;
static LIST_HEAD(fnic_list);
static DEFINE_SPINLOCK(fnic_list_lock);
static DEFINE_IDA(fnic_ida);
+struct work_struct reset_fnic_work;
+LIST_HEAD(reset_fnic_list);
+DEFINE_SPINLOCK(reset_fnic_list_lock);
+
/* Supported devices by fnic module */
-static struct pci_device_id fnic_id_table[] = {
+static const struct pci_device_id fnic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
{ 0, }
};
@@ -60,6 +65,14 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_fdmi_support = 1;
+module_param(fnic_fdmi_support, int, 0644);
+MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support");
+
+static unsigned int fnic_tgt_id_binding = 1;
+module_param(fnic_tgt_id_binding, uint, 0644);
+MODULE_PARM_DESC(fnic_tgt_id_binding,
+ "Target ID binding (0 for none. 1 for binding by WWPN (default))");
unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
module_param(io_completions, int, S_IRUGO|S_IWUSR);
@@ -79,15 +92,15 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
-static struct libfc_function_template fnic_transport_template = {
- .frame_send = fnic_send,
- .lport_set_port_id = fnic_set_port_id,
- .fcp_abort_io = fnic_empty_scsi_cleanup,
- .fcp_cleanup = fnic_empty_scsi_cleanup,
- .exch_mgr_reset = fnic_exch_mgr_reset
-};
+unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON;
+module_param(pc_rscn_handling_feature_flag, uint, 0644);
+MODULE_PARM_DESC(pc_rscn_handling_feature_flag,
+ "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))");
-static int fnic_slave_alloc(struct scsi_device *sdev)
+struct workqueue_struct *reset_fnic_work_queue;
+struct workqueue_struct *fnic_fip_queue;
+
+static int fnic_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -105,8 +118,8 @@ static const struct scsi_host_template fnic_host_template = {
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
- .eh_host_reset_handler = fnic_host_reset,
- .slave_alloc = fnic_slave_alloc,
+ .eh_host_reset_handler = fnic_eh_host_reset_handler,
+ .sdev_init = fnic_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -146,7 +159,7 @@ static struct fc_function_template fnic_fc_functions = {
.get_host_speed = fnic_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
- .get_host_port_state = fc_get_host_port_state,
+ .get_host_port_state = fnic_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.show_rport_maxframe_size = 1,
@@ -157,54 +170,88 @@ static struct fc_function_template fnic_fc_functions = {
.show_starget_port_id = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
- .issue_fc_host_lip = fnic_reset,
+ .issue_fc_host_lip = fnic_issue_fc_host_lip,
.get_fc_host_stats = fnic_get_stats,
.reset_fc_host_stats = fnic_reset_host_stats,
- .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .dd_fcrport_size = sizeof(struct rport_dd_data_s),
.terminate_rport_io = fnic_terminate_rport_io,
- .bsg_request = fc_lport_bsg_request,
+ .bsg_request = NULL,
};
static void fnic_get_host_speed(struct Scsi_Host *shost)
{
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
+ struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "port_speed: %d Mbps", port_speed);
+ atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed);
/* Add in other values as they get defined in fw */
switch (port_speed) {
+ case DCEM_PORTSPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case DCEM_PORTSPEED_2G:
+ fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+ break;
+ case DCEM_PORTSPEED_4G:
+ fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+ break;
+ case DCEM_PORTSPEED_8G:
+ fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+ break;
case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_16G:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
case DCEM_PORTSPEED_20G:
fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
break;
case DCEM_PORTSPEED_25G:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
+ case DCEM_PORTSPEED_32G:
+ fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+ break;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
break;
+ case DCEM_PORTSPEED_50G:
+ fc_host_speed(shost) = FC_PORTSPEED_50GBIT;
+ break;
+ case DCEM_PORTSPEED_64G:
+ fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
+ break;
case DCEM_PORTSPEED_100G:
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
break;
+ case DCEM_PORTSPEED_128G:
+ fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
+ break;
default:
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Unknown FC speed: %d Mbps", port_speed);
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
+/* Placeholder function */
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
- struct fc_host_statistics *stats = &lp->host_stats;
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
+ struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats;
struct vnic_stats *vs;
unsigned long flags;
- if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
+ if (time_before
+ (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
return stats;
fnic->stats_time = jiffies;
@@ -213,24 +260,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic: Get vnic stats failed"
- " 0x%x", ret);
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic: Get vnic stats failed: 0x%x", ret);
return stats;
}
vs = fnic->stats;
stats->tx_frames = vs->tx.tx_unicast_frames_ok;
- stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
+ stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
stats->rx_frames = vs->rx.rx_unicast_frames_ok;
- stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
+ stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset =
- (jiffies - fnic->stats_reset_time) / HZ;
+ (jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
-
return stats;
}
@@ -311,8 +356,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host,
static void fnic_reset_host_stats(struct Scsi_Host *host)
{
int ret;
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct fc_host_statistics *stats;
unsigned long flags;
@@ -325,7 +369,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
- FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
@@ -344,25 +388,19 @@ void fnic_log_q_error(struct fnic *fnic)
for (i = 0; i < fnic->raw_wq_count; i++) {
error_status = ioread32(&fnic->wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "WQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->rq_count; i++) {
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "RQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status);
if (error_status)
- shost_printk(KERN_ERR, fnic->lport->host,
- "CWQ[%d] error_status"
- " %d\n", i, error_status);
+ dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status);
}
}
@@ -396,8 +434,7 @@ static int fnic_notify_set(struct fnic *fnic)
err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base);
break;
default:
- shost_printk(KERN_ERR, fnic->lport->host,
- "Interrupt mode should be set up"
+ dev_err(&fnic->pdev->dev, "Interrupt mode should be set up"
" before devcmd notify set %d\n",
vnic_dev_get_intr_mode(fnic->vdev));
err = -1;
@@ -416,13 +453,6 @@ static void fnic_notify_timer(struct timer_list *t)
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(struct timer_list *t)
-{
- struct fnic *fnic = from_timer(fnic, t, fip_timer);
-
- fnic_handle_fip_timer(fnic);
-}
-
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -522,6 +552,8 @@ static int fnic_cleanup(struct fnic *fnic)
vnic_intr_clean(&fnic->intr[i]);
mempool_destroy(fnic->io_req_pool);
+ mempool_destroy(fnic->frame_pool);
+ mempool_destroy(fnic->frame_elem_pool);
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
mempool_destroy(fnic->io_sgl_pool[i]);
@@ -534,25 +566,36 @@ static void fnic_iounmap(struct fnic *fnic)
iounmap(fnic->bar0.vaddr);
}
-/**
- * fnic_get_mac() - get assigned data MAC address for FIP code.
- * @lport: local port.
- */
-static u8 *fnic_get_mac(struct fc_lport *lport)
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- struct fnic *fnic = lport_priv(lport);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
- return fnic->data_src_addr;
+static void fnic_scsi_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->host;
+
+ snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
+ host->host_no);
+
+ host->transportt = fnic_fc_transport;
}
-static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+static void fnic_free_ioreq_tables_mq(struct fnic *fnic)
{
- vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ int hwq;
+
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
static int fnic_scsi_drv_init(struct fnic *fnic)
{
- struct Scsi_Host *host = fnic->lport->host;
+ struct Scsi_Host *host = fnic->host;
+ int err;
+ struct pci_dev *pdev = fnic->pdev;
+ struct fnic_iport_s *iport = &fnic->iport;
+ int hwq;
/* Configure maximum outstanding IO reqs*/
if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
@@ -563,103 +606,160 @@ static int fnic_scsi_drv_init(struct fnic *fnic)
fnic->fnic_max_tag_id = host->can_queue;
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;
- shost_printk(KERN_INFO, host,
- "fnic: can_queue: %d max_lun: %llu",
+ dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu",
host->can_queue, host->max_lun);
- shost_printk(KERN_INFO, host,
- "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
host->max_id, host->max_cmd_len, host->nr_hw_queues);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
+ fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
+ fnic->sw_copy_wq[hwq].io_req_table =
+ kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
+ sizeof(struct fnic_io_req *), GFP_KERNEL);
+
+ if (!fnic->sw_copy_wq[hwq].io_req_table) {
+ fnic_free_ioreq_tables_mq(fnic);
+ return -ENOMEM;
+ }
+ }
+
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
+
+ fnic_scsi_init(fnic);
+
+ err = scsi_add_host(fnic->host, &pdev->dev);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n");
+ return err;
+ }
+ fc_host_maxframe_size(fnic->host) = iport->max_payload_size;
+ fc_host_dev_loss_tmo(fnic->host) =
+ fnic->config.port_down_timeout / 1000;
+ sprintf(fc_host_symbolic_name(fnic->host),
+ DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(fnic->host) = iport->wwnn;
+ fc_host_port_name(fnic->host) = iport->wwpn;
+ fc_host_supported_classes(fnic->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(fnic->host), 0,
+ sizeof(fc_host_supported_fc4s(fnic->host)));
+ fc_host_supported_fc4s(fnic->host)[2] = 1;
+ fc_host_supported_fc4s(fnic->host)[7] = 1;
+ fc_host_supported_speeds(fnic->host) = 0;
+ fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT;
+
+ dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data);
+ if (fnic->host->shost_data != NULL) {
+ if (fnic_tgt_id_binding == 0) {
+ dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE;
+ } else {
+ dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n");
+ fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN;
+ }
+ }
+
+ fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
+ if (!fnic->io_req_pool) {
+ scsi_remove_host(fnic->host);
+ return -ENOMEM;
+ }
+
return 0;
}
void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
{
- struct fc_lport *lp = shost_priv(host);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(host));
struct pci_dev *l_pdev = fnic->pdev;
int intr_mode = fnic->config.intr_mode;
struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT];
if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"intr_mode is not msix\n");
return;
}
- FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"qmap->nr_queues: %d\n", qmap->nr_queues);
if (l_pdev == NULL) {
- FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"l_pdev is null\n");
return;
}
- blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+ blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct Scsi_Host *host;
- struct fc_lport *lp;
+ struct Scsi_Host *host = NULL;
struct fnic *fnic;
mempool_t *pool;
+ struct fnic_iport_s *iport;
int err = 0;
int fnic_id = 0;
int i;
unsigned long flags;
- int hwq;
+ char *desc, *subsys_desc;
+ int len;
/*
- * Allocate SCSI Host and set up association between host,
- * local port, and fnic
+ * Allocate fnic
*/
- lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
- if (!lp) {
- printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
+ fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL);
+ if (!fnic) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_fnic_alloc;
}
- host = lp->host;
- fnic = lport_priv(lp);
+ iport = &fnic->iport;
fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL);
if (fnic_id < 0) {
- pr_err("Unable to alloc fnic ID\n");
+ dev_err(&pdev->dev, "Unable to alloc fnic ID\n");
err = fnic_id;
goto err_out_ida_alloc;
}
- fnic->lport = lp;
- fnic->ctlr.lp = lp;
- fnic->link_events = 0;
- fnic->pdev = pdev;
- snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
- host->host_no);
-
- host->transportt = fnic_fc_transport;
+ fnic->pdev = pdev;
fnic->fnic_num = fnic_id;
- fnic_stats_debugfs_init(fnic);
+
+ /* Find model name from PCIe subsys ID */
+ if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) {
+ dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc);
+
+ /* Update FDMI model */
+ fnic->subsys_desc_len = strlen(subsys_desc);
+ len = ARRAY_SIZE(fnic->subsys_desc);
+ if (fnic->subsys_desc_len > len)
+ fnic->subsys_desc_len = len;
+ memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len);
+ dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc);
+ } else {
+ fnic->subsys_desc_len = 0;
+ dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown",
+ pdev->subsystem_device);
+ }
err = pci_enable_device(pdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI device, aborting.\n");
- goto err_out_free_hba;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n");
+ goto err_out_pci_enable_device;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot enable PCI resources, aborting\n");
- goto err_out_disable_device;
+ dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n");
+ goto err_out_pci_request_regions;
}
pci_set_master(pdev);
@@ -672,19 +772,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "No usable DMA configuration "
+ dev_err(&fnic->pdev->dev, "No usable DMA configuration "
"aborting\n");
- goto err_out_release_regions;
+ goto err_out_set_dma_mask;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "BAR0 not memory-map'able, aborting.\n");
+ dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_map_bar;
}
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
@@ -692,61 +790,79 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->bar0.len = pci_resource_len(pdev, 0);
if (!fnic->bar0.vaddr) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Cannot memory-map BAR0 res hdr, "
+ dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, "
"aborting.\n");
err = -ENODEV;
- goto err_out_release_regions;
+ goto err_out_fnic_map_bar;
}
fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
if (!fnic->vdev) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC registration failed, "
+ dev_err(&fnic->pdev->dev, "vNIC registration failed, "
"aborting.\n");
err = -ENODEV;
- goto err_out_iounmap;
+ goto err_out_dev_register;
}
err = vnic_dev_cmd_init(fnic->vdev);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_cmd_init() returns %d, aborting\n",
+ dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n",
err);
- goto err_out_vnic_unregister;
+ goto err_out_dev_cmd_init;
}
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev open failed, aborting.\n");
- goto err_out_dev_cmd_deinit;
+ dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n");
+ goto err_out_dev_open;
}
err = vnic_dev_init(fnic->vdev, 0);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC dev init failed, aborting.\n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n");
+ goto err_out_dev_init;
}
- err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+ err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vNIC get MAC addr failed \n");
- goto err_out_dev_close;
+ dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n");
+ goto err_out_dev_mac_addr;
}
/* set data_src for point-to-point mode and to keep it non-zero */
- memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
+ memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN);
/* Get vNIC configuration */
err = fnic_get_vnic_config(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Get vNIC configuration failed, "
+ dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_get_config;
+ }
+
+ switch (fnic->config.flags & 0xff0) {
+ case VFCF_FC_INITIATOR:
+ {
+ host =
+ scsi_host_alloc(&fnic_host_template,
+ sizeof(struct fnic *));
+ if (!host) {
+ dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n");
+ err = -ENOMEM;
+ goto err_out_scsi_host_alloc;
+ }
+ *((struct fnic **) shost_priv(host)) = fnic;
+
+ fnic->host = host;
+ fnic->role = FNIC_ROLE_FCP_INITIATOR;
+ dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n",
+ fnic->fnic_num);
+ }
+ break;
+ default:
+ dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num);
+ err = -EINVAL;
+ goto err_out_fnic_role;
}
/* Setup PCI resources */
@@ -756,29 +872,18 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fnic_set_intr_mode(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to set intr mode, "
+ dev_err(&fnic->pdev->dev, "Failed to set intr mode, "
"aborting.\n");
- goto err_out_dev_close;
+ goto err_out_fnic_set_intr_mode;
}
err = fnic_alloc_vnic_resources(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc vNIC resources, "
+ dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, "
"aborting.\n");
- goto err_out_clear_intr;
- }
-
- fnic_scsi_drv_init(fnic);
-
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) {
- fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id;
- fnic->sw_copy_wq[hwq].io_req_table =
- kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) *
- sizeof(struct fnic_io_req *), GFP_KERNEL);
+ goto err_out_fnic_alloc_vnic_res;
}
- shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
+ dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n",
fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size);
/* initialize all fnic locks */
@@ -794,50 +899,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
fnic->fw_ack_index[i] = -1;
}
- err = -ENOMEM;
- fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
- if (!fnic->io_req_pool)
- goto err_out_free_resources;
-
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
- if (!pool)
- goto err_out_free_ioreq_pool;
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_free_resources;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
- if (!pool)
+ if (!pool) {
+ err = -ENOMEM;
goto err_out_free_dflt_pool;
+ }
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_pool;
+ }
+ fnic->frame_pool = pool;
+
+ pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM,
+ fdls_frame_elem_cache);
+ if (!pool) {
+ err = -ENOMEM;
+ goto err_out_fdls_frame_elem_pool;
+ }
+ fnic->frame_elem_pool = pool;
+
/* setup vlan config, hw inserts vlan header */
fnic->vlan_hw_insert = 1;
fnic->vlan_id = 0;
- /* Initialize the FIP fcoe_ctrl struct */
- fnic->ctlr.send = fnic_eth_send;
- fnic->ctlr.update_mac = fnic_update_mac;
- fnic->ctlr.get_src_addr = fnic_get_mac;
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware supports FIP\n");
+ dev_info(&fnic->pdev->dev, "firmware supports FIP\n");
/* enable directed and multicast */
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
- vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
- fnic->set_vlan = fnic_set_vlan;
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
+ vnic_dev_add_addr(fnic->vdev, iport->hwmac);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
- INIT_WORK(&fnic->event_work, fnic_handle_event);
- skb_queue_head_init(&fnic->fip_frame_queue);
- INIT_LIST_HEAD(&fnic->evlist);
- INIT_LIST_HEAD(&fnic->vlans);
+ INIT_LIST_HEAD(&fnic->fip_frame_queue);
+ INIT_LIST_HEAD(&fnic->vlan_list);
+ timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0);
+ timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0);
+ timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0);
+ timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0);
+ fnic->set_vlan = fnic_set_vlan;
} else {
- shost_printk(KERN_INFO, fnic->lport->host,
- "firmware uses non-FIP mode\n");
- fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
- fnic->ctlr.state = FIP_ST_NON_FIP;
+ dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n");
}
fnic->state = FNIC_IN_FC_MODE;
@@ -850,9 +961,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification buffer area */
err = fnic_notify_set(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to alloc notify buffer, aborting.\n");
- goto err_out_free_max_pool;
+ dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n");
+ goto err_out_fnic_notify_set;
}
/* Setup notify timer when using MSI interrupts */
@@ -863,13 +973,62 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_alloc_rq_frame can't alloc "
+ dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc "
"frame\n");
- goto err_out_rq_buf;
+ goto err_out_alloc_rq_buf;
}
}
+ init_completion(&fnic->reset_completion_wait);
+
+ /* Start local port initialization */
+ iport->max_flogi_retries = fnic->config.flogi_retries;
+ iport->max_plogi_retries = fnic->config.plogi_retries;
+ iport->plogi_timeout = fnic->config.plogi_timeout;
+ iport->service_params =
+ (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS |
+ FNIC_FCP_SP_CONF_CMPL);
+ if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
+ iport->service_params |= FNIC_FCP_SP_RETRY;
+
+ iport->boot_time = jiffies;
+ iport->e_d_tov = fnic->config.ed_tov;
+ iport->r_a_tov = fnic->config.ra_tov;
+ iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT;
+ iport->wwpn = fnic->config.port_wwn;
+ iport->wwnn = fnic->config.node_wwn;
+
+ iport->max_payload_size = fnic->config.maxdatafieldsize;
+
+ if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) ||
+ (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) ||
+ ((iport->max_payload_size % 4) != 0)) {
+ iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ }
+
+ iport->flags |= FNIC_FIRST_LINK_UP;
+
+ timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback,
+ 0);
+
+ fnic->stats_reset_time = jiffies;
+
+ INIT_WORK(&fnic->link_work, fnic_handle_link);
+ INIT_WORK(&fnic->frame_work, fnic_handle_frame);
+ INIT_WORK(&fnic->tport_work, fnic_tport_event_handler);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
+
+ INIT_LIST_HEAD(&fnic->frame_queue);
+ INIT_LIST_HEAD(&fnic->tx_queue);
+ INIT_LIST_HEAD(&fnic->tport_event_list);
+
+ INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry,
+ fdls_schedule_oxid_free_retry_work);
+
+ /* Initialize the oxid reclaim list and work struct */
+ INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list);
+ INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler);
+
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
@@ -880,180 +1039,131 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->hw_copy_wq[i]);
- err = fnic_request_intr(fnic);
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to request irq.\n");
- goto err_out_request_intr;
- }
+ vnic_dev_enable(fnic->vdev);
- /*
- * Initialization done with PCI system, hardware, firmware.
- * Add host to SCSI
- */
- err = scsi_add_host(lp->host, &pdev->dev);
+ err = fnic_request_intr(fnic);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic: scsi_add_host failed...exiting\n");
- goto err_out_scsi_add_host;
+ dev_err(&fnic->pdev->dev, "Unable to request irq.\n");
+ goto err_out_fnic_request_intr;
}
+ fnic_notify_timer_start(fnic);
- /* Start local port initiatialization */
-
- lp->link_up = 0;
-
- lp->max_retry_count = fnic->config.flogi_retries;
- lp->max_rport_retry_count = fnic->config.plogi_retries;
- lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
- FCP_SPPF_CONF_COMPL);
- if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
- lp->service_params |= FCP_SPPF_RETRY;
-
- lp->boot_time = jiffies;
- lp->e_d_tov = fnic->config.ed_tov;
- lp->r_a_tov = fnic->config.ra_tov;
- lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
- fc_set_wwnn(lp, fnic->config.node_wwn);
- fc_set_wwpn(lp, fnic->config.port_wwn);
-
- fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
-
- if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
- FCPIO_HOST_EXCH_RANGE_END, NULL)) {
- err = -ENOMEM;
- goto err_out_fc_exch_mgr_alloc;
- }
-
- fc_lport_init_stats(lp);
- fnic->stats_reset_time = jiffies;
+ fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE));
- fc_lport_config(lp);
+ err = fnic_scsi_drv_init(fnic);
+ if (err)
+ goto err_out_scsi_drv_init;
- if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
- sizeof(struct fc_frame_header))) {
- err = -EINVAL;
- goto err_out_free_exch_mgr;
+ err = fnic_stats_debugfs_init(fnic);
+ if (err) {
+ dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n");
+ goto err_out_free_stats_debugfs;
}
- fc_host_maxframe_size(lp->host) = lp->mfs;
- fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
- sprintf(fc_host_symbolic_name(lp->host),
- DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
+ for (i = 0; i < fnic->intr_count; i++)
+ vnic_intr_unmask(&fnic->intr[i]);
spin_lock_irqsave(&fnic_list_lock, flags);
list_add_tail(&fnic->list, &fnic_list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- INIT_WORK(&fnic->link_work, fnic_handle_link);
- INIT_WORK(&fnic->frame_work, fnic_handle_frame);
- INIT_WORK(&fnic->flush_work, fnic_flush_tx);
- skb_queue_head_init(&fnic->frame_queue);
- skb_queue_head_init(&fnic->tx_queue);
-
- fc_fabric_login(lp);
-
- vnic_dev_enable(fnic->vdev);
-
- for (i = 0; i < fnic->intr_count; i++)
- vnic_intr_unmask(&fnic->intr[i]);
-
- fnic_notify_timer_start(fnic);
-
return 0;
-err_out_free_exch_mgr:
- fc_exch_mgr_free(lp);
-err_out_fc_exch_mgr_alloc:
- fc_remove_host(lp->host);
- scsi_remove_host(lp->host);
-err_out_scsi_add_host:
+err_out_free_stats_debugfs:
+ fnic_stats_debugfs_remove(fnic);
+ fnic_free_ioreq_tables_mq(fnic);
+ scsi_remove_host(fnic->host);
+err_out_scsi_drv_init:
fnic_free_intr(fnic);
-err_out_request_intr:
- for (i = 0; i < fnic->rq_count; i++)
+err_out_fnic_request_intr:
+err_out_alloc_rq_buf:
+ for (i = 0; i < fnic->rq_count; i++) {
+ if (ioread32(&fnic->rq[i].ctrl->enable))
+ vnic_rq_disable(&fnic->rq[i]);
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
-err_out_rq_buf:
+ }
vnic_dev_notify_unset(fnic->vdev);
-err_out_free_max_pool:
+err_out_fnic_notify_set:
+ mempool_destroy(fnic->frame_elem_pool);
+err_out_fdls_frame_elem_pool:
+ mempool_destroy(fnic->frame_pool);
+err_out_fdls_frame_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
-err_out_free_ioreq_pool:
- mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
fnic_free_vnic_resources(fnic);
-err_out_clear_intr:
+err_out_fnic_alloc_vnic_res:
fnic_clear_intr_mode(fnic);
-err_out_dev_close:
+err_out_fnic_set_intr_mode:
+ scsi_host_put(fnic->host);
+err_out_fnic_role:
+err_out_scsi_host_alloc:
+err_out_fnic_get_config:
+err_out_dev_mac_addr:
+err_out_dev_init:
vnic_dev_close(fnic->vdev);
-err_out_dev_cmd_deinit:
-err_out_vnic_unregister:
+err_out_dev_open:
+err_out_dev_cmd_init:
vnic_dev_unregister(fnic->vdev);
-err_out_iounmap:
+err_out_dev_register:
fnic_iounmap(fnic);
-err_out_release_regions:
+err_out_fnic_map_bar:
+err_out_map_bar:
+err_out_set_dma_mask:
pci_release_regions(pdev);
-err_out_disable_device:
+err_out_pci_request_regions:
pci_disable_device(pdev);
-err_out_free_hba:
- fnic_stats_debugfs_remove(fnic);
+err_out_pci_enable_device:
ida_free(&fnic_ida, fnic->fnic_num);
err_out_ida_alloc:
- scsi_host_put(lp->host);
-err_out:
+ kfree(fnic);
+err_out_fnic_alloc:
return err;
}
static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
- struct fc_lport *lp = fnic->lport;
unsigned long flags;
- int hwq;
/*
- * Mark state so that the workqueue thread stops forwarding
- * received frames and link events to the local port. ISR and
- * other threads that can queue work items will also stop
- * creating work items on the fnic workqueue
+ * Sometimes when probe() fails and do not exit with an error code,
+ * remove() gets called with 'drvdata' not set. Avoid a crash by
+ * adding a defensive check.
*/
+ if (!fnic)
+ return;
+
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->stop_rx_link_events = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- del_timer_sync(&fnic->notify_timer);
-
/*
* Flush the fnic event queue. After this call, there should
* be no event queued for this fnic device in the workqueue
*/
flush_workqueue(fnic_event_queue);
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+
+ fnic_scsi_unload(fnic);
+
+ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
+ timer_delete_sync(&fnic->notify_timer);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
- del_timer_sync(&fnic->fip_timer);
- skb_queue_purge(&fnic->fip_frame_queue);
+ timer_delete_sync(&fnic->retry_fip_timer);
+ timer_delete_sync(&fnic->fcs_ka_timer);
+ timer_delete_sync(&fnic->enode_ka_timer);
+ timer_delete_sync(&fnic->vn_ka_timer);
+
+ fnic_free_txq(&fnic->fip_frame_queue);
fnic_fcoe_reset_vlans(fnic);
- fnic_fcoe_evlist_free(fnic);
}
- /*
- * Log off the fabric. This stops all remote ports, dns port,
- * logs off the fabric. This flushes all rport, disc, lport work
- * before returning
- */
- fc_fabric_logoff(fnic->lport);
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->in_remove = 1;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0))
+ timer_delete_sync(&fnic->iport.fabric.fdmi_timer);
- fcoe_ctlr_destroy(&fnic->ctlr);
- fc_lport_destroy(lp);
fnic_stats_debugfs_remove(fnic);
/*
@@ -1063,18 +1173,13 @@ static void fnic_remove(struct pci_dev *pdev)
*/
fnic_cleanup(fnic);
- BUG_ON(!skb_queue_empty(&fnic->frame_queue));
- BUG_ON(!skb_queue_empty(&fnic->tx_queue));
-
spin_lock_irqsave(&fnic_list_lock, flags);
list_del(&fnic->list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
- fc_remove_host(fnic->lport->host);
- scsi_remove_host(fnic->lport->host);
- for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
- kfree(fnic->sw_copy_wq[hwq].io_req_table);
- fc_exch_mgr_free(fnic->lport);
+ fnic_free_txq(&fnic->frame_queue);
+ fnic_free_txq(&fnic->tx_queue);
+
vnic_dev_notify_unset(fnic->vdev);
fnic_free_intr(fnic);
fnic_free_vnic_resources(fnic);
@@ -1084,8 +1189,11 @@ static void fnic_remove(struct pci_dev *pdev)
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
ida_free(&fnic_ida, fnic->fnic_num);
- scsi_host_put(lp->host);
+ fnic_scsi_unload_cleanup(fnic);
+ scsi_host_put(fnic->host);
+ kfree(fnic);
}
static struct pci_driver fnic_driver = {
@@ -1161,6 +1269,24 @@ static int __init fnic_init_module(void)
goto err_create_fnic_ioreq_slab;
}
+ fdls_frame_cache = kmem_cache_create("fdls_frames",
+ FNIC_FCOE_FRAME_MAXSZ,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_cache) {
+ pr_err("fnic fdls frame cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache;
+ }
+
+ fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem",
+ sizeof(struct fnic_frame_list),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fdls_frame_elem_cache) {
+ pr_err("fnic fdls frame elem cache create failed\n");
+ err = -ENOMEM;
+ goto err_create_fdls_frame_cache_elem;
+ }
+
fnic_event_queue =
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq");
if (!fnic_event_queue) {
@@ -1177,6 +1303,19 @@ static int __init fnic_init_module(void)
goto err_create_fip_workq;
}
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) {
+ reset_fnic_work_queue =
+ create_singlethread_workqueue("reset_fnic_work_queue");
+ if (!reset_fnic_work_queue) {
+ pr_err("reset fnic work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_reset_fnic_workq;
+ }
+ spin_lock_init(&reset_fnic_list_lock);
+ INIT_LIST_HEAD(&reset_fnic_list);
+ INIT_WORK(&reset_fnic_work, fnic_reset_work_handler);
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -1197,8 +1336,15 @@ err_pci_register:
err_fc_transport:
destroy_workqueue(fnic_fip_queue);
err_create_fip_workq:
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+err_create_reset_fnic_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
+ kmem_cache_destroy(fdls_frame_elem_cache);
+err_create_fdls_frame_cache_elem:
+ kmem_cache_destroy(fdls_frame_cache);
+err_create_fdls_frame_cache:
kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
@@ -1215,11 +1361,17 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
+
+ if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
+ destroy_workqueue(reset_fnic_work_queue);
+
if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
+
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
+ kmem_cache_destroy(fdls_frame_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
fnic_fc_trace_free();
diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
new file mode 100644
index 000000000000..36a2c1268422
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/if_ether.h>
+#include "fnic.h"
+
+static struct fnic_pcie_device fnic_pcie_device_table[] = {
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA,
+ "VIC 1280"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI,
+ "VIC 1240"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE,
+ "VIC 1285"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno",
+ PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"},
+ {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE,
+ "VIC 1227T"},
+
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA,
+ "VIC 1340"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW,
+ "VIC 1380"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE,
+ "VIC 1385"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2,
+ "C3260-SIOC"},
+ {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT,
+ "VIC 1387"},
+
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY,
+ "VIC 1457"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE,
+ "VIC 1485"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA,
+ "VIC 1495"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT,
+ "VIC 1497"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE,
+ "VIC 1467"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON,
+ "VIC 1477"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"},
+ {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega",
+ PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"},
+
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN,
+ "VIC 15420"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW,
+ "VIC 15411"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU,
+ "VIC 15238"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA,
+ "VIC 15422"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly",
+ PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH,
+ "VIC 15230"},
+ {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA,
+ "VIC 15427"},
+
+ {0,}
+};
+
+int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc,
+ char **subsys_desc)
+{
+ unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC;
+ int max = ARRAY_SIZE(fnic_pcie_device_table);
+ struct fnic_pcie_device *t = fnic_pcie_device_table;
+ int index = 0;
+
+ if (pdev->device != device)
+ return 1;
+
+ while (t->device != 0) {
+ if (memcmp
+ ((char *) &pdev->subsystem_device,
+ (char *) &t->subsystem_device, sizeof(short)) == 0)
+ break;
+ t++;
+ index++;
+ }
+
+ if (index >= max - 1) {
+ *desc = NULL;
+ *subsys_desc = NULL;
+ return 1;
+ }
+
+ *desc = fnic_pcie_device_table[index].desc;
+ *subsys_desc = fnic_pcie_device_table[index].subsys_desc;
+ return 0;
+}
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 33dd27f6f24e..763475587b7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic)
offsetof(struct vnic_fc_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
- shost_printk(KERN_ERR, fnic->lport->host, \
- "Error getting %s, %d\n", #m, \
- err); \
+ dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \
return err; \
} \
} while (0);
@@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic)
GET_CONFIG(intr_mode);
GET_CONFIG(wq_copy_count);
+ if ((c->flags & (VFCF_FC_INITIATOR)) == 0) {
+ dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n");
+ c->flags |= VFCF_FC_INITIATOR;
+ }
+
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
@@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic)
c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC MAC addr %pM "
- "wq/wq_copy/rq %d/%d/%d\n",
- fnic->ctlr.ctl_src_addr,
+ dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n",
+ fnic->data_src_addr,
c->wq_enet_desc_count, c->wq_copy_desc_count,
c->rq_desc_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC node wwn %llx port wwn %llx\n",
+ dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n",
c->node_wwn, c->port_wwn);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC ed_tov %d ra_tov %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n",
c->ed_tov, c->ra_tov);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC mtu %d intr timer %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n",
c->maxdatafieldsize, c->intr_timer);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flags 0x%x luns per tgt %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n",
c->flags, c->luns_per_tgt);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC flogi_retries %d flogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n",
c->flogi_retries, c->flogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC plogi retries %d plogi timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n",
c->plogi_retries, c->plogi_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC io throttle count %d link dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n",
c->io_throttle_count, c->link_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC port dn io retries %d port dn timeout %d\n",
+ dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC wq_copy_count: %d\n", c->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC intr mode: %d\n", c->intr_mode);
+ dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode);
return 0;
}
@@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic)
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_count: %d\n", fnic->wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources rq_count: %d\n", fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources cq_count: %d\n", fnic->cq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC fw resources intr_count: %d\n", fnic->intr_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count);
+ dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count);
}
void fnic_free_vnic_resources(struct fnic *fnic)
@@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
- shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
+ dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n",
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d",
+ dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d",
fnic->wq_count, fnic->wq_copy_count,
fnic->raw_wq_count, fnic->rq_count);
- shost_printk(KERN_INFO, fnic->lport->host,
- "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n",
+ dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n",
fnic->cq_count, fnic->intr_count,
fnic->config.wq_copy_desc_count);
@@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Failed to hook legacy pba resource\n");
+ dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
@@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic)
/* init the stats memory by making the first call here */
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "vnic_dev_stats_dump failed - x%x\n", err);
+ dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err);
goto err_out_cleanup;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2ba61dba4569..7133b254cbe4 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -23,11 +23,13 @@
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
-#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
+
const char *fnic_state_str[] = {
[FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
@@ -65,6 +67,18 @@ static const char *fcpio_status_str[] = {
[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};
+enum terminate_io_return {
+ TERM_SUCCESS = 0,
+ TERM_NO_SC = 1,
+ TERM_IO_REQ_NOT_FOUND,
+ TERM_ANOTHER_PORT,
+ TERM_GSTATE,
+ TERM_IO_BLOCKED,
+ TERM_OUT_OF_WQ_DESC,
+ TERM_TIMED_OUT,
+ TERM_MISC,
+};
+
const char *fnic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
@@ -90,8 +104,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status)
return fcpio_status_str[status];
}
-static void fnic_cleanup_io(struct fnic *fnic);
-
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
@@ -114,6 +126,65 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
+static bool
+fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ u32 *portid = data1;
+ unsigned int *count = data2;
+ struct fnic_io_req *io_req = fnic_priv(sc)->io_req;
+
+ if (!io_req || (*portid && (io_req->port_id != *portid)))
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter,
+ &portid, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "portid = 0x%x count = %u\n", portid, count);
+ return count;
+}
+
+unsigned int fnic_count_all_ioreqs(struct fnic *fnic)
+{
+ return fnic_count_ioreqs(fnic, 0);
+}
+
+static bool
+fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
+ void *data1, void *data2)
+{
+ struct scsi_device *scsi_device = data1;
+ unsigned int *count = data2;
+
+ if (sc->device != scsi_device || !fnic_priv(sc)->io_req)
+ return true;
+
+ *count += 1;
+ return true;
+}
+
+unsigned int
+fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device)
+{
+ unsigned int count = 0;
+
+ fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter,
+ scsi_device, &count);
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "lun = %p count = %u\n", scsi_device, count);
+ return count;
+}
+
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
{
@@ -179,12 +250,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
int ret = 0;
unsigned long flags;
+ unsigned int ioreq_count;
/* indicate fwreset to io path */
fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
-
- skb_queue_purge(&fnic->frame_queue);
- skb_queue_purge(&fnic->tx_queue);
+ ioreq_count = fnic_count_all_ioreqs(fnic);
/* wait for io cmpl */
while (atomic_read(&fnic->in_flight))
@@ -198,6 +268,8 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "ioreq_count: %u\n", ioreq_count);
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -211,11 +283,11 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Issued fw reset\n");
} else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to issue fw reset\n");
}
@@ -231,10 +303,10 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
enum fcpio_flogi_reg_format_type format;
- struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
int ret = 0;
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
@@ -246,28 +318,23 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
goto flogi_reg_ioreq_end;
}
- if (fnic->ctlr.map_dest) {
- eth_broadcast_addr(gw_mac);
- format = FCPIO_FLOGI_REG_DEF_DEST;
- } else {
- memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
- format = FCPIO_FLOGI_REG_GW_DEST;
- }
+ memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN);
+ format = FCPIO_FLOGI_REG_GW_DEST;
- if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
+ if (fnic->config.flags & VFCF_FIP_CAPABLE) {
fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
fc_id, gw_mac,
- fnic->data_src_addr,
- lp->r_a_tov, lp->e_d_tov);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
- fc_id, fnic->data_src_addr, gw_mac);
+ fnic->iport.fpma,
+ iport->r_a_tov, iport->e_d_tov);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n",
+ fc_id, fnic->iport.fpma, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n",
- fc_id, fnic->ctlr.map_dest, gw_mac);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FLOGI reg issued fcid 0x%x dest %p\n",
+ fc_id, gw_mac);
}
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
@@ -295,13 +362,17 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned int i;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,7 +413,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -356,7 +427,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
exch_flags = 0;
if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
- (rp->flags & FC_RP_FLAGS_RETRY))
+ (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
@@ -371,8 +442,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
sc->cmnd, sc->cmd_len,
scsi_bufflen(sc),
fc_lun.scsi_lun, io_req->port_id,
- rport->maxframe_size, rp->r_a_tov,
- rp->e_d_tov);
+ tport->max_payload_size,
+ tport->r_a_tov, tport->e_d_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
@@ -388,10 +459,10 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
struct request *const rq = scsi_cmd_to_rq(sc);
uint32_t mqtag = 0;
void (*done)(struct scsi_cmnd *) = scsi_done;
- struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host));
+ struct fnic_iport_s *iport = NULL;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret = 1;
@@ -400,32 +471,14 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
unsigned long flags = 0;
unsigned long ptr;
int io_lock_acquired = 0;
- struct fc_rport_libfc_priv *rp;
uint16_t hwq = 0;
-
- mqtag = blk_mq_unique_tag(rq);
- spin_lock_irqsave(&fnic->fnic_lock, flags);
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
- fnic->state_flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ struct fnic_tport_s *tport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ uint16_t lun0_delay = 0;
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
done(sc);
@@ -434,50 +487,96 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
ret = fc_remote_port_chkready(rport);
if (ret) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"rport is not ready\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
- rp = rport->dd_data;
- if (!rp || rp->rp_state == RPORT_ST_DELETE) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x removed, returning DID_NO_CONNECT\n",
- rport->port_id);
+ mqtag = blk_mq_unique_tag(rq);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT<<16;
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as iport state: %d\n",
+ iport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (rp->rp_state != RPORT_ST_READY) {
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
- rport->port_id, rp->rp_state);
+ /* fc_remote_port_add() may have added the tport to
+ * fc_transport but dd_data not yet set
+ */
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport || (rdd_data->iport != iport)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "dd_data not yet set in SCSI for rport portid: 0x%x\n",
+ rport->port_id);
+ tport = fnic_find_tport_by_fcid(iport, rport->port_id);
+ if (!tport) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
+ rport->port_id);
+ sc->result = DID_BUS_BUSY << 16;
+ done(sc);
+ return 0;
+ }
+
+ /* Re-assign same params as in fnic_fdls_add_tport */
+ rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
+ rport->supported_classes =
+ FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
+ /* the dd_data is allocated by fctransport of size dd_fcrport_size */
+ rdd_data = rport->dd_data;
+ rdd_data->tport = tport;
+ rdd_data->iport = iport;
+ tport->rport = rport;
+ tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
+ }
- sc->result = DID_IMM_RETRY << 16;
+ if ((tport->state != FDLS_TGT_STATE_READY)
+ && (tport->state != FDLS_TGT_STATE_ADISC)) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "returning DID_NO_CONNECT for IO as tport state: %d\n",
+ tport->state);
+ sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "state not ready: %d/link not up: %d Returning HOST_BUSY\n",
- lp->state, lp->link_up);
return SCSI_MLQUEUE_HOST_BUSY;
}
- atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
+ fnic->state_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (!tport->lun0_delay) {
+ lun0_delay = 1;
+ tport->lun0_delay++;
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
@@ -499,6 +598,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
goto out;
}
+ io_req->tport = tport;
/* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
@@ -575,6 +675,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
@@ -602,6 +703,14 @@ out:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
+
+ if (lun0_delay) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "LUN0 delay\n");
+ mdelay(LUN0_DELAY_TIME);
+ }
+
return ret;
}
@@ -625,7 +734,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */
- fnic_cleanup_io(fnic);
+ fnic_cleanup_io(fnic, SCSI_NO_TAG);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
@@ -637,44 +746,37 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"reset failed with header status: %s\n",
fnic_fcpio_status_to_str(hdr_status));
- /*
- * Unable to change to eth mode, cannot send out flogi
- * Change state to fc mode, so that subsequent Flogi
- * requests from libFC will cause more attempts to
- * reset the firmware. Free the cached flogi
- */
fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Unexpected state while processing reset completion: %s\n",
fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
- /* Thread removing device blocks till firmware reset is complete */
- if (fnic->remove_wait)
- complete(fnic->remove_wait);
+ if (fnic->fw_reset_done)
+ complete(fnic->fw_reset_done);
/*
* If fnic is being removed, or fw reset failed
* free the flogi frame. Else, send it out
*/
- if (fnic->remove_wait || ret) {
+ if (ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- skb_queue_purge(&fnic->tx_queue);
+ fnic_free_txq(&fnic->tx_queue);
goto reset_cmpl_handler_end;
}
@@ -710,19 +812,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
/* Check flogi registration completion status */
if (!hdr_status) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "flog reg succeeded\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "FLOGI reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic flogi reg :failed %s\n",
+ fnic->host, fnic->fnic_num,
+ "fnic flogi reg failed: %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
@@ -795,7 +897,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
- fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
ox_id_tag[4], ox_id_tag[5]);
}
@@ -833,36 +935,36 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
hwq = blk_mq_unique_tag_to_hwq(mqtag);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s icmnd completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag >= fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Out of range tag\n",
fnic_fcpio_status_to_str(hdr_status));
return;
}
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, desc);
FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
- fnic->lport->host->host_no, id,
+ fnic->host->host_no, id,
((u64)icmnd_cmpl->_resvd0[1] << 16 |
(u64)icmnd_cmpl->_resvd0[0]),
((u64)hdr_status << 16 |
@@ -885,7 +987,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"icmnd_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc);
@@ -912,7 +1014,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"icmnd_cmpl abts pending "
"hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
@@ -943,6 +1045,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "xfer_len: %llu", xfer_len);
break;
case FCPIO_TIMEOUT: /* request was timed out */
@@ -1004,7 +1109,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
- shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
@@ -1024,13 +1129,13 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
- fnic->lport->host_stats.fcp_input_requests++;
+ fnic_stats->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
- fnic->lport->host_stats.fcp_output_requests++;
+ fnic_stats->host_stats.fcp_output_requests++;
fnic->fcp_output_bytes += xfer_len;
} else
- fnic->lport->host_stats.fcp_control_requests++;
+ fnic_stats->host_stats.fcp_control_requests++;
/* Call SCSI completion function to complete the IO */
scsi_done(sc);
@@ -1097,27 +1202,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
if (hwq != cq_index) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s ITMF completion on the wrong queue\n",
fnic_fcpio_status_to_str(hdr_status));
}
if (tag > fnic->fnic_max_tag_id) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
} else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
hwq, mqtag, tag, cq_index);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hdr status: %s Tag out of range\n",
fnic_fcpio_status_to_str(hdr_status));
return;
@@ -1133,14 +1238,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req)
sc = io_req->sc;
} else {
- sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
}
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), tag);
return;
@@ -1152,7 +1257,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), tag, sc);
@@ -1163,7 +1268,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
/* Abort and terminate completion of device reset req */
/* REVISIT : Add asserts about various flags */
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1175,7 +1280,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
- shost_printk(KERN_DEBUG, fnic->lport->host,
+ shost_printk(KERN_DEBUG, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
hwq, mqtag, tag,
fnic_fcpio_status_to_str(hdr_status));
@@ -1190,7 +1295,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_ITMF_REJECTED:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK));
break;
@@ -1225,7 +1330,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
@@ -1238,11 +1343,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
if (io_req->abts_done) {
complete(io_req->abts_done);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
hwq, mqtag, tag);
} else {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1273,7 +1378,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- shost_printk(KERN_INFO, fnic->lport->host,
+ shost_printk(KERN_INFO, fnic->host,
"hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1285,7 +1390,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1298,7 +1403,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1307,7 +1412,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
}
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
hwq, mqtag,
tag, fnic_fcpio_status_to_str(hdr_status));
@@ -1316,7 +1421,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
} else {
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
__func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1371,7 +1476,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
default:
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"firmware completion type %d\n",
desc->hdr.type);
break;
@@ -1414,8 +1519,8 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
struct request *const rq = scsi_cmd_to_rq(sc);
struct fnic *fnic = data;
struct fnic_io_req *io_req;
- unsigned long flags = 0;
unsigned long start_time = 0;
+ unsigned long flags;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
uint16_t hwq = 0;
int tag;
@@ -1432,14 +1537,14 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
hwq, mqtag, tag, fnic_priv(sc)->flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
- !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
@@ -1449,6 +1554,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
+
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
@@ -1458,19 +1564,19 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->io_req = NULL;
io_req->sc = NULL;
+ start_time = io_req->start_time;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
- start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
sc->result = DID_TRANSPORT_DISRUPTED << 16;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
- "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
mqtag, tag, sc, (jiffies - start_time));
if (atomic64_read(&fnic->io_cmpl_skip))
@@ -1479,23 +1585,60 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&fnic_stats->io_stats.io_completions);
FNIC_TRACE(fnic_cleanup_io,
- sc->device->host->host_no, tag, sc,
- jiffies_to_msecs(jiffies - start_time),
- 0, ((u64)sc->cmnd[0] << 32 |
- (u64)sc->cmnd[2] << 24 |
- (u64)sc->cmnd[3] << 16 |
- (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- fnic_flags_and_state(sc));
-
+ sc->device->host->host_no, tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64) sc->cmnd[0] << 32 |
+ (u64) sc->cmnd[2] << 24 |
+ (u64) sc->cmnd[3] << 16 |
+ (u64) sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)->
+ state));
+
+ /* Complete the command to SCSI */
scsi_done(sc);
-
return true;
}
-static void fnic_cleanup_io(struct fnic *fnic)
+static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
- scsi_host_busy_iter(fnic->lport->host,
- fnic_cleanup_io_iter, fnic);
+ unsigned int io_count = 0;
+ unsigned long flags;
+ struct fnic_io_req *io_req = NULL;
+ struct scsi_cmnd *sc = NULL;
+
+ io_count = fnic_count_all_ioreqs(fnic);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ scsi_host_busy_iter(fnic->host,
+ fnic_cleanup_io_iter, fnic);
+
+ /* with sg3utils device reset, SC needs to be retrieved from ioreq */
+ spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
+ io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id];
+ if (io_req) {
+ sc = io_req->sc;
+ if (sc) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+
+ while ((io_count = fnic_count_all_ioreqs(fnic))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Outstanding ioreq count: %d active io count: %lld Waiting\n",
+ io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ schedule_timeout(msecs_to_jiffies(100));
+ }
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
@@ -1516,7 +1659,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
if (id >= fnic->fnic_max_tag_id)
return;
- sc = scsi_host_find_tag(fnic->lport->host, id);
+ sc = scsi_host_find_tag(fnic->host, id);
if (!sc)
return;
@@ -1545,7 +1688,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
@@ -1567,10 +1710,13 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
+ struct fnic_tport_s *tport = io_req->tport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 1;
} else
@@ -1585,7 +1731,8 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ atomic_dec(&tport->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
@@ -1619,20 +1766,24 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
struct fnic *fnic = iter_data->fnic;
int abt_tag = 0;
struct fnic_io_req *io_req;
- unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
uint16_t hwq = 0;
+ unsigned long flags;
abt_tag = blk_mq_unique_tag(rq);
hwq = blk_mq_unique_tag_to_hwq(abt_tag);
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
+ if (!sc) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq);
+ return true;
+ }
+ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
-
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
@@ -1640,7 +1791,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
hwq, abt_tag, fnic_priv(sc)->flags);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
@@ -1655,37 +1806,40 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
+
if (io_req->abts_done) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "fnic_rport_exch_reset: io_req->abts_done is set "
- "state is %s\n",
+ shost_printk(KERN_ERR, fnic->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "rport_exch_reset "
- "IO not yet issued %p tag 0x%x flags "
- "%x state %d\n",
- sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
+ shost_printk(KERN_ERR, fnic->host,
+ "rport_exch_reset IO not yet issued %p abt_tag 0x%x",
+ sc, abt_tag);
+ shost_printk(KERN_ERR, fnic->host,
+ "flags %x state %d\n", fnic_priv(sc)->flags,
+ fnic_priv(sc)->state);
}
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "dev reset sc 0x%p\n", sc);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
- BUG_ON(io_req->abts_done);
-
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc);
+ WARN_ON_ONCE(io_req->abts_done);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"fnic_rport_reset_exch: Issuing abts\n");
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- /* Now queue the abort command to firmware */
+ /* Queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
@@ -1698,7 +1852,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
* lun reset
*/
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
hwq, abt_tag, fnic_priv(sc)->flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
@@ -1714,11 +1868,14 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
+
return true;
}
-static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
+ unsigned int io_count = 0;
+ unsigned long flags;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct fnic_rport_abort_io_iter_data iter_data = {
.fnic = fnic,
@@ -1726,53 +1883,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
.term_cnt = 0,
};
- FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
- "fnic_rport_exch_reset called portid 0x%06x\n",
- port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fnic rport exchange reset for tport: 0x%06x\n",
+ port_id);
if (fnic->in_remove)
return;
- scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
+ io_count = fnic_count_ioreqs(fnic, port_id);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n",
+ port_id, io_count,
+ atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ /* Bump in_flight counter to hold off fnic_fw_reset_handler. */
+ atomic_inc(&fnic->in_flight);
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
+ atomic_dec(&fnic->in_flight);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter,
&iter_data);
+
if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
+ atomic_dec(&fnic->in_flight);
+
+ while ((io_count = fnic_count_ioreqs(fnic, port_id)))
+ schedule_timeout(msecs_to_jiffies(1000));
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "rport: 0x%x remaining portid-io-count: %d ",
+ port_id, io_count);
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
- struct fc_rport_libfc_priv *rdata;
- struct fc_lport *lport;
- struct fnic *fnic;
+ struct fnic_tport_s *tport;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_iport_s *iport = NULL;
+ struct fnic *fnic = NULL;
if (!rport) {
- printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
+ pr_err("rport is NULL\n");
return;
}
- rdata = rport->dd_data;
- if (!rdata) {
- printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
- return;
+ rdd_data = rport->dd_data;
+ if (rdd_data) {
+ tport = rdd_data->tport;
+ if (!tport) {
+ pr_err(
+ "term rport io called after tport is deleted. Returning 0x%8x\n",
+ rport->port_id);
+ } else {
+ pr_err(
+ "term rport io called after tport is set 0x%8x\n",
+ rport->port_id);
+ pr_err(
+ "tport maybe rediscovered\n");
+
+ iport = (struct fnic_iport_s *) tport->iport;
+ fnic = iport->fnic;
+ fnic_rport_exch_reset(fnic, rport->port_id);
+ }
}
- lport = rdata->local_port;
+}
- if (!lport) {
- printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
- return;
- }
- fnic = lport_priv(lport);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
- rport->port_name, rport->node_name, rport,
- rport->port_id);
+/*
+ * FCP-SCSI specific handling for module unload
+ *
+ */
+void fnic_scsi_unload(struct fnic *fnic)
+{
+ unsigned long flags;
- if (fnic->in_remove)
- return;
+ /*
+ * Mark state so that the workqueue thread stops forwarding
+ * received frames and link events to the local port. ISR and
+ * other threads that can queue work items will also stop
+ * creating work items on the fnic workqueue
+ */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
+ fnic_scsi_fcpio_reset(fnic);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->in_remove = 1;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ fnic_flush_tport_event_list(fnic);
+ fnic_delete_fcp_tports(fnic);
+}
- fnic_rport_exch_reset(fnic, rport->port_id);
+void fnic_scsi_unload_cleanup(struct fnic *fnic)
+{
+ int hwq = 0;
+
+ fc_remove_host(fnic->host);
+ scsi_remove_host(fnic->host);
+ for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
+ kfree(fnic->sw_copy_wq[hwq].io_req_table);
}
/*
@@ -1783,10 +2002,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct request *const rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
+ struct fnic_iport_s *iport;
+ struct fnic_tport_s *tport;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
+ struct rport_dd_data_s *rdd_data;
unsigned long flags;
unsigned long start_time = 0;
int ret = SUCCESS;
@@ -1806,11 +2027,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
-
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ iport = &fnic->iport;
+
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
@@ -1821,7 +2042,44 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
- if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Abort cmd called after tport delete! rport fcid: 0x%x",
+ rport->port_id);
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n",
+ sc->device->lun, hwq, mqtag,
+ sc->cmnd[0], fnic_priv(sc)->flags);
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x",
+ rport->port_id, sc->device->lun, hwq, mqtag);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Op: 0x%x flags: 0x%x\n",
+ sc->cmnd[0],
+ fnic_priv(sc)->flags);
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ ret = FAILED;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
ret = FAILED;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_abort_cmd_end;
@@ -1843,6 +2101,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
+ ret = FAILED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
goto fnic_abort_cmd_end;
}
@@ -1870,7 +2129,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
else
atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
sc->cmnd[0], abt_issued_time);
/*
@@ -1893,7 +2152,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
else {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
}
@@ -1961,7 +2220,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Issuing host reset due to out of order IO\n");
ret = FAILED;
@@ -2009,7 +2268,7 @@ fnic_abort_cmd_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2027,6 +2286,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
unsigned long flags;
uint16_t hwq = 0;
uint32_t tag = 0;
+ struct fnic_tport_s *tport = io_req->tport;
tag = io_req->tag;
hwq = blk_mq_unique_tag_to_hwq(tag);
@@ -2037,8 +2297,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
FNIC_FLAGS_IO_BLOCKED))) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return FAILED;
- } else
+ } else {
atomic_inc(&fnic->in_flight);
+ atomic_inc(&tport->in_flight);
+ }
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
@@ -2047,7 +2309,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
free_wq_copy_descs(fnic, wq, hwq);
if (!vnic_wq_copy_desc_avail(wq)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
@@ -2072,6 +2334,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
atomic_dec(&fnic->in_flight);
+ atomic_dec(&tport->in_flight);
return ret;
}
@@ -2114,7 +2377,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2124,14 +2387,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst not pending sc 0x%p\n", sc);
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
return true;
}
if (io_req->abts_done)
- shost_printk(KERN_ERR, fnic->lport->host,
+ shost_printk(KERN_ERR, fnic->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
old_ioreq_state = fnic_priv(sc)->state;
@@ -2147,7 +2410,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
BUG_ON(io_req->abts_done);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"dev rst sc 0x%p\n", sc);
}
@@ -2169,7 +2432,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
iter_data->ret = FAILED;
- FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
hwq, abt_tag);
return false;
@@ -2248,7 +2511,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
iter_data.lr_sc = lr_sc;
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_pending_aborts_iter, &iter_data);
if (iter_data.ret == FAILED) {
ret = iter_data.ret;
@@ -2261,7 +2524,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
ret = 1;
clean_pending_aborts_end:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"exit status: %d\n", ret);
return ret;
}
@@ -2274,11 +2537,11 @@ clean_pending_aborts_end:
int fnic_device_reset(struct scsi_cmnd *sc)
{
struct request *rq = scsi_cmd_to_rq(sc);
- struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
+ int count = 0;
int ret = FAILED;
unsigned long flags;
unsigned long start_time = 0;
@@ -2289,31 +2552,63 @@ int fnic_device_reset(struct scsi_cmnd *sc)
DECLARE_COMPLETION_ONSTACK(tm_done);
bool new_sc = 0;
uint16_t hwq = 0;
+ struct fnic_iport_s *iport = NULL;
+ struct rport_dd_data_s *rdd_data;
+ struct fnic_tport_s *tport;
+ u32 old_soft_reset_count;
+ u32 old_link_down_cnt;
+ int exit_dr = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
- lp = shost_priv(sc->device->host);
+ fnic = *((struct fnic **) shost_priv(sc->device->host));
+ iport = &fnic->iport;
- fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
- reset_stats = &fnic->fnic_stats.reset_stats;
+ reset_stats = &fnic_stats->reset_stats;
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
rport->port_id, sc->device->lun, hwq, mqtag,
fnic_priv(sc)->flags);
- if (lp->state != LPORT_ST_READY || !(lp->link_up))
+ rdd_data = rport->dd_data;
+ tport = rdd_data->tport;
+ if (!tport) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n",
+ rport->port_id, sc->device->lun);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if (iport->state != FNIC_IPORT_STATE_READY) {
+ atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "iport NOT in READY state");
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ goto fnic_device_reset_end;
+ }
+
+ if ((tport->state != FDLS_TGT_STATE_READY) &&
+ (tport->state != FDLS_TGT_STATE_ADISC)) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "tport state: %d\n", tport->state);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto fnic_device_reset_end;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/* Check if remote port up */
if (fc_remote_port_chkready(rport)) {
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
goto fnic_device_reset_end;
}
@@ -2352,6 +2647,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->port_id = rport->port_id;
io_req->tag = mqtag;
fnic_priv(sc)->io_req = io_req;
+ io_req->tport = tport;
io_req->sc = sc;
if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
@@ -2366,7 +2662,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -2383,6 +2679,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ old_link_down_cnt = iport->fnic->link_down_cnt;
+ old_soft_reset_count = fnic->soft_reset_count;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
/*
* Wait on the local completion for LUN reset. The io_req may be
* freed while we wait since we hold no lock.
@@ -2390,14 +2691,39 @@ int fnic_device_reset(struct scsi_cmnd *sc)
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ /*
+ * Wake up can be due to the following reasons:
+ * 1) The device reset completed from target.
+ * 2) Device reset timed out.
+ * 3) A link-down/host_reset may have happened in between.
+ * 4) The device reset was aborted and io_req->dr_done was called.
+ */
+
+ exit_dr = 0;
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if ((old_link_down_cnt != fnic->link_down_cnt) ||
+ (fnic->reset_in_progress) ||
+ (fnic->soft_reset_count != old_soft_reset_count) ||
+ (iport->state != FNIC_IPORT_STATE_READY))
+ exit_dr = 1;
+
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
goto fnic_device_reset_end;
}
+
+ if (exit_dr) {
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Host reset called for fnic. Exit device reset\n");
+ io_req->dr_done = NULL;
+ goto fnic_device_reset_clean;
+ }
io_req->dr_done = NULL;
status = fnic_priv(sc)->lr_status;
@@ -2408,53 +2734,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Device reset timed out\n");
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
int_to_scsilun(sc->device->lun, &fc_lun);
- /*
- * Issue abort and terminate on device reset request.
- * If q'ing of terminate fails, retry it after a delay.
- */
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- break;
- }
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- if (fnic_queue_abort_io_req(fnic,
- mqtag | FNIC_TAG_DEV_RST,
- FCPIO_ITMF_ABT_TASK_TERM,
- fc_lun.scsi_lun, io_req, hwq)) {
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
- } else {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
- fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
- io_req->abts_done = &tm_done;
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n",
- mqtag, sc);
- break;
- }
- }
- while (1) {
- spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
- if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
- spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
- wait_for_completion_timeout(&tm_done,
- msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
- break;
- } else {
- io_req = fnic_priv(sc)->io_req;
- io_req->abts_done = NULL;
- goto fnic_device_reset_clean;
- }
- }
+ goto fnic_device_reset_clean;
} else {
spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
}
@@ -2463,7 +2747,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status != FCPIO_SUCCESS) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
FNIC_SCSI_DBG(KERN_DEBUG,
- fnic->lport->host, fnic->fnic_num,
+ fnic->host, fnic->fnic_num,
"Device reset completed - failed\n");
io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
@@ -2479,9 +2763,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
io_req = fnic_priv(sc)->io_req;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "Device reset failed"
- " since could not abort all IOs\n");
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
+ "Device reset failed: Cannot abort all IOs\n");
goto fnic_device_reset_clean;
}
@@ -2507,6 +2790,15 @@ fnic_device_reset_clean:
mempool_free(io_req, fnic->io_req_pool);
}
+ /*
+ * If link-event is seen while LUN reset is issued we need
+ * to complete the LUN reset here
+ */
+ if (!new_sc) {
+ sc->result = DID_RESET << 16;
+ scsi_done(sc);
+ }
+
fnic_device_reset_end:
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
@@ -2520,7 +2812,18 @@ fnic_device_reset_end:
mutex_unlock(&fnic->sgreset_mutex);
}
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
+ while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) {
+ if (count >= 2) {
+ ret = FAILED;
+ break;
+ }
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "Cannot clean up all IOs for the LUN\n");
+ schedule_timeout(msecs_to_jiffies(1000));
+ count++;
+ }
+
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
@@ -2531,67 +2834,78 @@ fnic_device_reset_end:
return ret;
}
-/* Clean up all IOs, clean up libFC local port */
-int fnic_reset(struct Scsi_Host *shost)
+static void fnic_post_flogo_linkflap(struct fnic *fnic)
+{
+ unsigned long flags;
+
+ fnic_fdls_link_status_change(fnic, 0);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+
+ if (fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ fnic_fdls_link_status_change(fnic, 1);
+ return;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/* Logout from all the targets and simulate link flap */
+void fnic_reset(struct Scsi_Host *shost)
{
- struct fc_lport *lp;
struct fnic *fnic;
- int ret = 0;
struct reset_stats *reset_stats;
- lp = shost_priv(shost);
- fnic = lport_priv(lp);
+ fnic = *((struct fnic **) shost_priv(shost));
reset_stats = &fnic->fnic_stats.reset_stats;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Issuing fnic reset\n");
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fnic reset\n");
atomic64_inc(&reset_stats->fnic_resets);
+ fnic_post_flogo_linkflap(fnic);
- /*
- * Reset local port, this will clean up libFC exchanges,
- * reset remote port sessions, and if link is up, begin flogi
- */
- ret = fc_lport_reset(lp);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Returning from fnic reset");
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
- "Returning from fnic reset with: %s\n",
- (ret == 0) ? "SUCCESS" : "FAILED");
+ atomic64_inc(&reset_stats->fnic_reset_completions);
+}
- if (ret == 0)
- atomic64_inc(&reset_stats->fnic_reset_completions);
- else
- atomic64_inc(&reset_stats->fnic_reset_failures);
+int fnic_issue_fc_host_lip(struct Scsi_Host *shost)
+{
+ int ret = 0;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FC host lip issued");
+ ret = fnic_host_reset(shost);
return ret;
}
-/*
- * SCSI Error handling calls driver's eh_host_reset if all prior
- * error handling levels return FAILED. If host reset completes
- * successfully, and if link is up, then Fabric login begins.
- *
- * Host Reset is the highest level of error recovery. If this fails, then
- * host is offlined by SCSI.
- *
- */
-int fnic_host_reset(struct scsi_cmnd *sc)
+int fnic_host_reset(struct Scsi_Host *shost)
{
- int ret;
+ int ret = SUCCESS;
unsigned long wait_host_tmo;
- struct Scsi_Host *shost = sc->device->host;
- struct fc_lport *lp = shost_priv(shost);
- struct fnic *fnic = lport_priv(lp);
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
unsigned long flags;
+ struct fnic_iport_s *iport = &fnic->iport;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (!fnic->internal_reset_inprogress) {
- fnic->internal_reset_inprogress = true;
+ if (fnic->reset_in_progress == NOT_IN_PROGRESS) {
+ fnic->reset_in_progress = IN_PROGRESS;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "host reset in progress skipping another host reset\n");
- return SUCCESS;
+ wait_for_completion_timeout(&fnic->reset_completion_wait,
+ msecs_to_jiffies(10000));
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->reset_in_progress == IN_PROGRESS) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
+ "Firmware reset in progress. Skipping another host reset\n");
+ return SUCCESS;
+ }
+ fnic->reset_in_progress = IN_PROGRESS;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
@@ -2600,140 +2914,34 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
- ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
- if (ret == SUCCESS) {
+ fnic_reset(shost);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->reset_in_progress = NOT_IN_PROGRESS;
+ complete(&fnic->reset_completion_wait);
+ fnic->soft_reset_count++;
+
+ /* wait till the link is up */
+ if (fnic->link_status) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
while (time_before(jiffies, wait_host_tmo)) {
- if ((lp->state == LPORT_ST_READY) &&
- (lp->link_up)) {
+ if (iport->state != FNIC_IPORT_STATE_READY
+ && fnic->link_status) {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ ssleep(1);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ } else {
ret = SUCCESS;
break;
}
- ssleep(1);
}
}
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = false;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return ret;
-}
-
-/*
- * This fxn is called from libFC when host is removed
- */
-void fnic_scsi_abort_io(struct fc_lport *lp)
-{
- int err = 0;
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
- DECLARE_COMPLETION_ONSTACK(remove_wait);
-
- /* Issue firmware reset for fnic, wait for reset to complete */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
- fnic->link_events) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
-
- fnic->remove_wait = &remove_wait;
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- err = fnic_fw_reset_handler(fnic);
- if (err) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- fnic->remove_wait = NULL;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- return;
- }
-
- /* Wait for firmware reset to complete */
- wait_for_completion_timeout(&remove_wait,
- msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
-
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->remove_wait = NULL;
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
- "fnic_scsi_abort_io %s\n",
- (fnic->state == FNIC_IN_ETH_MODE) ?
- "SUCCESS" : "FAILED");
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
-}
-
-/*
- * This fxn called from libFC to clean up driver IO state on link down
- */
-void fnic_scsi_cleanup(struct fc_lport *lp)
-{
- unsigned long flags;
- enum fnic_state old_state;
- struct fnic *fnic = lport_priv(lp);
-
- /* issue fw reset */
-retry_fw_reset:
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
- /* fw reset is in progress, poll for its completion */
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- schedule_timeout(msecs_to_jiffies(100));
- goto retry_fw_reset;
- }
- old_state = fnic->state;
- fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
- fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-
- if (fnic_fw_reset_handler(fnic)) {
- spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
- fnic->state = old_state;
- spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- }
-
-}
-
-void fnic_empty_scsi_cleanup(struct fc_lport *lp)
-{
-}
-
-void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
-{
- struct fnic *fnic = lport_priv(lp);
-
- /* Non-zero sid, nothing to do */
- if (sid)
- goto call_fc_exch_mgr_reset;
-
- if (did) {
- fnic_rport_exch_reset(fnic, did);
- goto call_fc_exch_mgr_reset;
- }
-
- /*
- * sid = 0, did = 0
- * link down or device being removed
- */
- if (!fnic->in_remove)
- fnic_scsi_cleanup(lp);
- else
- fnic_scsi_abort_io(lp);
-
- /* call libFC exch mgr reset to reset its exchanges */
-call_fc_exch_mgr_reset:
- fc_exch_mgr_reset(lp, sid, did);
-
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "host reset return status: %d\n", ret);
+ return ret;
}
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
@@ -2771,7 +2979,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
hwq, tag,
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
@@ -2804,8 +3012,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
}
/* walk again to check, if IOs are still pending in fw */
- scsi_host_busy_iter(fnic->lport->host,
+ scsi_host_busy_iter(fnic->host,
fnic_abts_pending_iter, &iter_data);
return iter_data.ret;
}
+
+/*
+ * SCSI Error handling calls driver's eh_host_reset if all prior
+ * error handling levels return FAILED. If host reset completes
+ * successfully, and if link is up, then Fabric login begins.
+ *
+ * Host Reset is the highest level of error recovery. If this fails, then
+ * host is offlined by SCSI.
+ *
+ */
+int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
+{
+ int ret = 0;
+ struct Scsi_Host *shost = sc->device->host;
+ struct fnic *fnic = *((struct fnic **) shost_priv(shost));
+
+ FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
+ "SCSI error handling: fnic host reset");
+
+ ret = fnic_host_reset(shost);
+ return ret;
+}
+
+
+void fnic_scsi_fcpio_reset(struct fnic *fnic)
+{
+ unsigned long flags;
+ enum fnic_state old_state;
+ struct fnic_iport_s *iport = &fnic->iport;
+ DECLARE_COMPLETION_ONSTACK(fw_reset_done);
+ int time_remain;
+
+ /* issue fw reset */
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "fnic is in unexpected state: %d for fw_reset\n",
+ fnic->state);
+ return;
+ }
+
+ old_state = fnic->state;
+ fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
+
+ fnic_update_mac_locked(fnic, iport->hwmac);
+ fnic->fw_reset_done = &fw_reset_done;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Issuing fw reset\n");
+ if (fnic_fw_reset_handler(fnic)) {
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
+ fnic->state = old_state;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ } else {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Waiting for fw completion\n");
+ time_remain = wait_for_completion_timeout(&fw_reset_done,
+ msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Woken up after fw completion timeout\n");
+ if (time_remain == 0) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FW reset completion timed out after %d ms)\n",
+ FNIC_FW_RESET_TIMEOUT);
+ }
+ atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
+ }
+ fnic->fw_reset_done = NULL;
+}
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 9d7f98c452dd..8ddd20401a59 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -3,6 +3,7 @@
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
#define FNIC_MQ_MAX_QUEUES 64
+#include <scsi/scsi_transport_fc.h>
struct stats_timestamps {
struct timespec64 last_reset_time;
@@ -63,6 +64,7 @@ struct reset_stats {
atomic64_t fw_resets;
atomic64_t fw_reset_completions;
atomic64_t fw_reset_failures;
+ atomic64_t fw_reset_timeouts;
atomic64_t fnic_resets;
atomic64_t fnic_reset_completions;
atomic64_t fnic_reset_failures;
@@ -102,10 +104,51 @@ struct misc_stats {
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t check_condition;
atomic64_t queue_fulls;
- atomic64_t rport_not_ready;
+ atomic64_t tport_not_ready;
+ atomic64_t iport_not_ready;
atomic64_t frame_errors;
atomic64_t current_port_speed;
atomic64_t intx_dummy;
+ atomic64_t port_speed_in_mbps;
+};
+
+struct fnic_iport_stats {
+ atomic64_t num_linkdn;
+ atomic64_t num_linkup;
+ atomic64_t link_failure_count;
+ atomic64_t num_rscns;
+ atomic64_t rscn_redisc;
+ atomic64_t rscn_not_redisc;
+ atomic64_t frame_err;
+ atomic64_t num_rnid;
+ atomic64_t fabric_flogi_sent;
+ atomic64_t fabric_flogi_ls_accepts;
+ atomic64_t fabric_flogi_ls_rejects;
+ atomic64_t fabric_flogi_misc_rejects;
+ atomic64_t fabric_plogi_sent;
+ atomic64_t fabric_plogi_ls_accepts;
+ atomic64_t fabric_plogi_ls_rejects;
+ atomic64_t fabric_plogi_misc_rejects;
+ atomic64_t fabric_scr_sent;
+ atomic64_t fabric_scr_ls_accepts;
+ atomic64_t fabric_scr_ls_rejects;
+ atomic64_t fabric_scr_misc_rejects;
+ atomic64_t fabric_logo_sent;
+ atomic64_t tport_alive;
+ atomic64_t tport_plogi_sent;
+ atomic64_t tport_plogi_ls_accepts;
+ atomic64_t tport_plogi_ls_rejects;
+ atomic64_t tport_plogi_misc_rejects;
+ atomic64_t tport_prli_sent;
+ atomic64_t tport_prli_ls_accepts;
+ atomic64_t tport_prli_ls_rejects;
+ atomic64_t tport_prli_misc_rejects;
+ atomic64_t tport_adisc_sent;
+ atomic64_t tport_adisc_ls_accepts;
+ atomic64_t tport_adisc_ls_rejects;
+ atomic64_t tport_logo_sent;
+ atomic64_t unsupported_frames_ls_rejects;
+ atomic64_t unsupported_frames_dropped;
};
struct fnic_stats {
@@ -116,6 +159,7 @@ struct fnic_stats {
struct reset_stats reset_stats;
struct fw_stats fw_stats;
struct vlan_stats vlan_stats;
+ struct fc_host_statistics host_stats;
struct misc_stats misc_stats;
};
@@ -127,6 +171,5 @@ struct stats_debug_info {
};
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
-void fnic_stats_debugfs_init(struct fnic *);
-void fnic_stats_debugfs_remove(struct fnic *);
+const char *fnic_role_to_str(unsigned int role);
#endif /* _FNIC_STATS_H_ */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index aaa4ea02fb7c..cdc6b12b1ec2 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -8,6 +8,7 @@
#include <linux/kallsyms.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <scsi/scsi_transport_fc.h>
#include "fnic_io.h"
#include "fnic.h"
@@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1;
int fnic_fc_trace_cleared = 1;
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+static const char * const fnic_role_str[] = {
+ [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator",
+};
+
+const char *fnic_role_to_str(unsigned int role)
+{
+ if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role])
+ return "Unknown";
+
+ return fnic_role_str[role];
+}
/*
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
@@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Check Conditions encountered: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
- "Number of receive frame errors: %lld\n",
+ "Number of receive frame errors: %lld\n"
+ "Port speed (in Mbps): %lld\n",
(u64)stats->misc_stats.last_isr_time,
(s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
@@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
(u64)atomic64_read(&stats->misc_stats.check_condition),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
- (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
- (u64)atomic64_read(&stats->misc_stats.frame_errors));
-
- len += scnprintf(debug->debug_buffer + len, buf_size - len,
- "Firmware reported port speed: %llu\n",
- (u64)atomic64_read(
- &stats->misc_stats.current_port_speed));
+ (u64)atomic64_read(&stats->misc_stats.tport_not_ready),
+ (u64)atomic64_read(&stats->misc_stats.frame_errors),
+ (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps));
return len;
}
+int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic)
+{
+ struct fnic_iport_s *iport = &fnic->iport;
+ int buf_size = info->buf_size;
+ int len = info->buffer_len;
+ struct fnic_tport_s *tport, *next;
+ unsigned long flags;
+
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\t Debug Info\n"
+ "------------------------------------------\n");
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "fnic Name:%s number:%d Role:%s State:%s\n",
+ fnic->name, fnic->fnic_num,
+ fnic_role_to_str(fnic->role),
+ fnic_state_to_str(fnic->state));
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n",
+ iport->state, iport->flags, iport->vlan_id, iport->fcid);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "usefip:%d fip_state:%d fip_flogi_retry:%d\n",
+ iport->usefip, iport->fip.state, iport->fip.flogi_retry);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fpma %02x:%02x:%02x:%02x:%02x:%02x",
+ iport->fpma[5], iport->fpma[4], iport->fpma[3],
+ iport->fpma[2], iport->fpma[1], iport->fpma[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3],
+ iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]);
+ len +=
+ snprintf(info->debug_buffer + len, buf_size - len,
+ "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n",
+ iport->fabric.state, iport->fabric.flags,
+ iport->fabric.retry_counter, iport->e_d_tov,
+ iport->r_a_tov);
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_for_each_entry_safe(tport, next, &iport->tport_list, links) {
+ len += snprintf(info->debug_buffer + len, buf_size - len,
+ "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n",
+ tport->fcid, tport->state, tport->flags,
+ atomic_read(&tport->in_flight),
+ tport->retry_counter);
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ return len;
+}
+
/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
@@ -485,8 +548,7 @@ int fnic_trace_buf_init(void)
}
fnic_trace_entries.page_offset =
- vmalloc(array_size(fnic_max_trace_entries,
- sizeof(unsigned long)));
+ vcalloc(fnic_max_trace_entries, sizeof(unsigned long));
if (!fnic_trace_entries.page_offset) {
printk(KERN_ERR PFX "Failed to allocate memory for"
" page_offset\n");
@@ -497,8 +559,6 @@ int fnic_trace_buf_init(void)
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
- memset((void *)fnic_trace_entries.page_offset, 0,
- (fnic_max_trace_entries * sizeof(unsigned long)));
fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
fnic_buf_head = fnic_trace_buf_p;
@@ -559,8 +619,7 @@ int fnic_fc_trace_init(void)
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
FC_TRC_SIZE_BYTES;
fnic_fc_ctlr_trace_buf_p =
- (unsigned long)vmalloc(array_size(PAGE_SIZE,
- fnic_fc_trace_max_pages));
+ (unsigned long)vcalloc(fnic_fc_trace_max_pages, PAGE_SIZE);
if (!fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Failed to allocate memory for "
"FC Control Trace Buf\n");
@@ -568,13 +627,9 @@ int fnic_fc_trace_init(void)
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
- fnic_fc_trace_max_pages * PAGE_SIZE);
-
/* Allocate memory for page offset */
fc_trace_entries.page_offset =
- vmalloc(array_size(fc_trace_max_entries,
- sizeof(unsigned long)));
+ vcalloc(fc_trace_max_entries, sizeof(unsigned long));
if (!fc_trace_entries.page_offset) {
pr_err("fnic:Failed to allocate memory for page_offset\n");
if (fnic_fc_ctlr_trace_buf_p) {
@@ -585,8 +640,6 @@ int fnic_fc_trace_init(void)
err = -ENOMEM;
goto err_fnic_fc_ctlr_trace_buf_init;
}
- memset((void *)fc_trace_entries.page_offset, 0,
- (fc_trace_max_entries * sizeof(unsigned long)));
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
@@ -688,7 +741,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
*/
if (frame_type == FNIC_FC_RECV) {
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
- sizeof(struct fcoe_hdr);
+ sizeof(struct fcoe_hdr);
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
/* Copy the rest of data frame */
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index a44768bceb9a..e17f5d8226bf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
@@ -634,8 +633,7 @@ extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
- int direction);
+extern u8 hisi_sas_get_ata_protocol(struct sas_task *task);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -644,9 +642,8 @@ extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern void hisi_sas_remove(struct platform_device *pdev);
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
-extern int hisi_sas_slave_alloc(struct scsi_device *sdev);
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern int hisi_sas_sdev_init(struct scsi_device *sdev);
extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time);
extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 53cb15f6714b..d3981b677931 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -21,8 +21,32 @@ struct hisi_sas_internal_abort_data {
bool rst_ha_timeout; /* reset the HA for timeout */
};
-u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
+static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc)
{
+ if (!qc)
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ case ATA_PROT_PIO:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ case ATA_PROT_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+ case ATA_PROT_NCQ_NODATA:
+ case ATA_PROT_NCQ:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+ default:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+
+u8 hisi_sas_get_ata_protocol(struct sas_task *task)
+{
+ struct host_to_dev_fis *fis = &task->ata_task.fis;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ int direction = task->data_dir;
+
switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
@@ -93,7 +117,7 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
- return HISI_SAS_SATA_PROTOCOL_PIO;
+ return hisi_sas_get_ata_protocol_from_tf(qc);
}
}
}
@@ -805,13 +829,13 @@ static int hisi_sas_init_device(struct domain_device *device)
return rc;
}
-int hisi_sas_slave_alloc(struct scsi_device *sdev)
+int hisi_sas_sdev_init(struct scsi_device *sdev)
{
struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_sas_device *sas_dev = ddev->lldd_dev;
int rc;
- rc = sas_slave_alloc(sdev);
+ rc = sas_sdev_init(sdev);
if (rc)
return rc;
@@ -821,7 +845,7 @@ int hisi_sas_slave_alloc(struct scsi_device *sdev)
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_init);
static int hisi_sas_dev_found(struct domain_device *device)
{
@@ -868,11 +892,10 @@ err_out:
return rc;
}
-int hisi_sas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
- int ret = sas_device_configure(sdev, lim);
+ int ret = sas_sdev_configure(sdev, lim);
if (ret)
return ret;
@@ -881,7 +904,7 @@ int hisi_sas_device_configure(struct scsi_device *sdev,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_sas_device_configure);
+EXPORT_SYMBOL_GPL(hisi_sas_sdev_configure);
void hisi_sas_scan_start(struct Scsi_Host *shost)
{
@@ -912,8 +935,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *port_dev;
int phy_no = sas_phy->id;
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
+ sas_port && port && (port->id != phy->port_id)) {
+ dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
+ phy_no, port->id, phy->port_id);
+ port_dev = sas_port->port_dev;
+ if (port_dev && !dev_is_expander(port_dev->dev_type)) {
+ /*
+ * Set the device state to gone to block
+ * sending IO to the device.
+ */
+ set_bit(SAS_DEV_GONE, &port_dev->state);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ return;
+ }
+ }
+
phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
@@ -1525,7 +1568,7 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
* which is also only used for v1/v2 hw to skip it for v3 hw
*/
if (hisi_hba->hw->sht)
- del_timer_sync(&hisi_hba->timer);
+ timer_delete_sync(&hisi_hba->timer);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
@@ -1842,33 +1885,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
}
hisi_sas_dereg_device(hisi_hba, device);
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
- struct sas_phy *local_phy;
-
+ if (dev_is_sata(device)) {
rc = hisi_sas_softreset_ata_disk(device);
- switch (rc) {
- case -ECOMM:
- rc = -ENODEV;
- break;
- case TMF_RESP_FUNC_FAILED:
- case -EMSGSIZE:
- case -EIO:
- local_phy = sas_get_local_phy(device);
- rc = sas_phy_enable(local_phy, 0);
- if (!rc) {
- local_phy->enabled = 0;
- dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
- SAS_ADDR(device->sas_addr), rc);
- rc = -ENODEV;
- }
- sas_put_local_phy(local_phy);
- break;
- default:
- break;
- }
+ if (rc == TMF_RESP_FUNC_FAILED)
+ dev_err(dev, "ata disk %016llx reset (%d)\n",
+ SAS_ADDR(device->sas_addr), rc);
}
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
hisi_sas_release_task(hisi_hba, device);
@@ -2340,7 +2364,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
for (i = 0; i < hisi_hba->n_phy; i++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
- del_timer_sync(&phy->timer);
+ timer_delete_sync(&phy->timer);
}
if (hisi_hba->wq)
@@ -2602,7 +2626,7 @@ void hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->shost;
- del_timer_sync(&hisi_hba->timer);
+ timer_delete_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
sas_remove_host(shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c3e571be2222..6621d633b2cc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1753,11 +1753,11 @@ static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v1_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v1_hw_groups,
.host_reset = hisi_sas_host_reset,
};
@@ -1806,7 +1806,7 @@ static struct platform_driver hisi_sas_v1_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
- .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
+ .acpi_match_table = sas_v1_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1a62b5d15eca..1e9830940f84 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2372,18 +2372,18 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case STAT_IO_COMPLETE:
/* internal abort command complete */
ts->stat = TMF_RESP_FUNC_SUCC;
- del_timer_sync(&slot->internal_abort_timer);
+ timer_delete_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
- del_timer_sync(&slot->internal_abort_timer);
+ timer_delete_sync(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
- del_timer_sync(&slot->internal_abort_timer);
+ timer_delete_sync(&slot->internal_abort_timer);
goto out;
default:
break;
@@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
struct sas_ata_task *ata_task = &task->ata_task;
struct sas_tmf_task *tmf = slot->tmf;
+ int phy_id;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw0, dw1 = 0, dw2 = 0;
@@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
dw0 |= 3 << CMD_HDR_CMD_OFF;
- else
+ } else {
+ phy_id = device->phy->identify.phy_identifier;
+ dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF;
+ dw0 |= CMD_HDR_FORCE_PHY_MSK;
dw0 |= 4 << CMD_HDR_CMD_OFF;
+ }
if (tmf && ata_task->force_phy) {
dw0 |= CMD_HDR_FORCE_PHY_MSK;
@@ -2538,9 +2543,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2656,7 +2659,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (is_sata_phy_v2_hw(hisi_hba, phy_no))
goto end;
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
if (phy_no == 8) {
u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
@@ -2732,7 +2735,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_port *port = phy->port;
struct device *dev = hisi_hba->dev;
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -2746,7 +2749,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id))
if (!check_any_wideports_v2_hw(hisi_hba) &&
timer_pending(&hisi_hba->timer))
- del_timer(&hisi_hba->timer);
+ timer_delete(&hisi_hba->timer);
txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -3206,7 +3209,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
int phy_no, offset;
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
phy_no = sas_phy->id;
initial_fis = &hisi_hba->initial_fis[phy_no];
@@ -3585,11 +3588,11 @@ static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba)
static const struct scsi_host_template sht_v2_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = hisi_sas_device_configure,
+ .sdev_configure = hisi_sas_sdev_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v2_hw_groups,
.sdev_groups = sdev_groups_v2_hw,
.host_reset = hisi_sas_host_reset,
@@ -3653,7 +3656,7 @@ static struct platform_driver hisi_sas_v2_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v2_of_match,
- .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
+ .acpi_match_table = sas_v2_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5db931663ae4..08dac9ae2f10 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -359,6 +359,10 @@
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
#define CMD_HDR_TLR_CTRL_OFF 6
#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PHY_ID_OFF 8
+#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
+#define CMD_HDR_FORCE_PHY_OFF 17
+#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF)
#define CMD_HDR_PORT_OFF 18
#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
#define CMD_HDR_PRIORITY_OFF 27
@@ -1429,15 +1433,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ int phy_id;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
- else
+ } else {
+ phy_id = device->phy->identify.phy_identifier;
+ hdr->dw0 |= cpu_to_le32((1U << phy_id)
+ << CMD_HDR_PHY_ID_OFF);
+ hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
+ }
switch (task->data_dir) {
case DMA_TO_DEVICE:
@@ -1456,9 +1466,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
@@ -1611,7 +1619,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
spin_lock(&phy->lock);
/* Delete timer and set phy_attached atomically */
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
phy->phy_attached = 1;
spin_unlock(&phy->lock);
@@ -1645,7 +1653,7 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
atomic_inc(&phy->down_cnt);
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -2908,12 +2916,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev,
}
static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw);
-static int device_configure_v3_hw(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int sdev_configure_v3_hw(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
struct hisi_hba *hisi_hba = shost_priv(shost);
- int ret = hisi_sas_device_configure(sdev, lim);
+ int ret = hisi_sas_sdev_configure(sdev, lim);
struct device *dev = hisi_hba->dev;
if (ret)
@@ -3328,24 +3336,24 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_map_hw_queues(qmap, hisi_hba->dev,
+ BASE_VECTORS_V3_HW);
qoff += qmap->nr_queues;
}
}
static const struct scsi_host_template sht_v3_hw = {
LIBSAS_SHT_BASE_NO_SLAVE_INIT
- .device_configure = device_configure_v3_hw,
+ .sdev_configure = sdev_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.map_queues = hisi_sas_map_queues,
.sg_tablesize = HISI_SAS_SGE_PAGE_CNT,
.sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT,
- .slave_alloc = hisi_sas_slave_alloc,
+ .sdev_init = hisi_sas_sdev_init,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
- .tag_alloc_policy = BLK_TAG_ALLOC_RR,
+ .tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index e021f1106bea..cc5d05dc395c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -473,10 +473,17 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
else
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
- if (sht->max_segment_size)
- shost->max_segment_size = sht->max_segment_size;
- else
- shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+ shost->virt_boundary_mask = sht->virt_boundary_mask;
+ if (shost->virt_boundary_mask) {
+ WARN_ON_ONCE(sht->max_segment_size &&
+ sht->max_segment_size != UINT_MAX);
+ shost->max_segment_size = UINT_MAX;
+ } else {
+ if (sht->max_segment_size)
+ shost->max_segment_size = sht->max_segment_size;
+ else
+ shost->max_segment_size = BLK_MAX_SEGMENT_SIZE;
+ }
/* 32-byte (dword) is a common minimum for HBAs. */
if (sht->dma_alignment)
@@ -492,9 +499,6 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
else
shost->dma_boundary = 0xffffffff;
- if (sht->virt_boundary_mask)
- shost->virt_boundary_mask = sht->virt_boundary_mask;
-
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0c49414c1f35..c73a71ac3c29 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -283,9 +283,10 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
-static int hpsa_slave_alloc(struct scsi_device *sdev);
-static int hpsa_slave_configure(struct scsi_device *sdev);
-static void hpsa_slave_destroy(struct scsi_device *sdev);
+static int hpsa_sdev_init(struct scsi_device *sdev);
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim);
+static void hpsa_sdev_destroy(struct scsi_device *sdev);
static void hpsa_update_scsi_devices(struct ctlr_info *h);
static int check_for_unit_attention(struct ctlr_info *h,
@@ -452,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int status, len;
+ int status;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &status) != 1)
+ if (kstrtoint(buf, 10, &status))
return -EINVAL;
h = shost_to_hba(shost);
h->acciopath_status = !!status;
@@ -476,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int debug_level, len;
+ int debug_level;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ if (kstrtoint(buf, 10, &debug_level))
return -EINVAL;
if (debug_level < 0)
debug_level = 0;
@@ -978,9 +971,9 @@ static const struct scsi_host_template hpsa_driver_template = {
.this_id = -1,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_configure = hpsa_slave_configure,
- .slave_destroy = hpsa_slave_destroy,
+ .sdev_init = hpsa_sdev_init,
+ .sdev_configure = hpsa_sdev_configure,
+ .sdev_destroy = hpsa_sdev_destroy,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpsa_compat_ioctl,
#endif
@@ -2107,7 +2100,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
return NULL;
}
-static int hpsa_slave_alloc(struct scsi_device *sdev)
+static int hpsa_sdev_init(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *sd = NULL;
unsigned long flags;
@@ -2142,7 +2135,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
/* configure scsi device based on internal per-device structure */
#define CTLR_TIMEOUT (120 * HZ)
-static int hpsa_slave_configure(struct scsi_device *sdev)
+static int hpsa_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct hpsa_scsi_dev_t *sd;
int queue_depth;
@@ -2173,7 +2167,7 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void hpsa_slave_destroy(struct scsi_device *sdev)
+static void hpsa_sdev_destroy(struct scsi_device *sdev)
{
struct hpsa_scsi_dev_t *hdev = NULL;
@@ -7236,8 +7230,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static void init_driver_version(char *driver_version, int len)
{
- memset(driver_version, 0, len);
- strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
+ strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len);
}
static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e889f268601b..21f1d9871a33 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1151,8 +1151,8 @@ static struct attribute *hptiop_host_attrs[] = {
ATTRIBUTE_GROUPS(hptiop_host);
-static int hptiop_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int hptiop_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (sdev->type == TYPE_TAPE)
lim->max_hw_sectors = 8192;
@@ -1168,7 +1168,7 @@ static const struct scsi_host_template driver_template = {
.emulated = 0,
.proc_name = driver_name,
.shost_groups = hptiop_host_groups,
- .device_configure = hptiop_device_configure,
+ .sdev_configure = hptiop_sdev_configure,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
.cmd_size = sizeof(struct hpt_cmd_priv),
@@ -1634,7 +1634,7 @@ static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
.host_phy_flag = cpu_to_le64(1),
};
-static struct pci_device_id hptiop_id_table[] = {
+static const struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index e66c3ef74267..4c493b06062a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1110,7 +1110,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
} else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
}
/**
@@ -1754,7 +1754,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
atomic_set(&evt->active, 0);
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
@@ -3393,7 +3393,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
/**
- * ibmvfc_slave_alloc - Setup the device's task set value
+ * ibmvfc_sdev_init - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
@@ -3402,7 +3402,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
-static int ibmvfc_slave_alloc(struct scsi_device *sdev)
+static int ibmvfc_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -3441,8 +3441,9 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
}
/**
- * ibmvfc_slave_configure - Configure the device
+ * ibmvfc_sdev_configure - Configure the device
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
@@ -3450,7 +3451,8 @@ static int ibmvfc_target_alloc(struct scsi_target *starget)
* Returns:
* 0
**/
-static int ibmvfc_slave_configure(struct scsi_device *sdev)
+static int ibmvfc_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
@@ -3639,7 +3641,7 @@ static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3662,13 +3664,13 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ibmvfc_trace_attr = {
+static const struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ibmvfc_read_trace,
+ .read_new = ibmvfc_read_trace,
};
#endif
@@ -3696,8 +3698,8 @@ static const struct scsi_host_template driver_template = {
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
- .slave_alloc = ibmvfc_slave_alloc,
- .slave_configure = ibmvfc_slave_configure,
+ .sdev_init = ibmvfc_sdev_init,
+ .sdev_configure = ibmvfc_sdev_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
@@ -3830,7 +3832,7 @@ static void ibmvfc_tasklet(void *data)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
list_del(&evt->queue_list);
ibmvfc_trc_end(evt);
evt->done(evt);
@@ -3936,7 +3938,7 @@ static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
spin_unlock_irqrestore(scrq->q_lock, flags);
list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
list_del(&evt->queue_list);
ibmvfc_trc_end(evt);
evt->done(evt);
@@ -4540,7 +4542,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
switch (status) {
case IBMVFC_MAD_SUCCESS:
@@ -4739,7 +4741,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
vhost->discovery_threads--;
- del_timer(&tgt->timer);
+ timer_delete(&tgt->timer);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
@@ -5517,7 +5519,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
@@ -5670,7 +5672,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
- del_timer_sync(&tgt->timer);
+ timer_delete_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 71f3e9563520..d65a45860b33 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -789,7 +789,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
while (!list_empty(&hostdata->sent)) {
evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
list_del(&evt->list);
- del_timer(&evt->timer);
+ timer_delete(&evt->timer);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (evt->cmnd) {
@@ -944,7 +944,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
be64_to_cpu(crq_as_u64[1]));
if (rc != 0) {
list_del(&evt_struct->list);
- del_timer(&evt_struct->timer);
+ timer_delete(&evt_struct->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
@@ -1840,7 +1840,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
&hostdata->request_limit);
- del_timer(&evt_struct->timer);
+ timer_delete(&evt_struct->timer);
if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
evt_struct->cmnd->result = DID_ERROR << 16;
@@ -1860,14 +1860,16 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
/**
- * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
+ * ibmvscsi_sdev_configure: Set the "allow_restart" flag for each disk.
* @sdev: struct scsi_device device to configure
+ * @lim: Request queue limits
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also as is required by the documentation for
* struct scsi_host_template.
*/
-static int ibmvscsi_slave_configure(struct scsi_device *sdev)
+static int ibmvscsi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
unsigned long lock_flags = 0;
@@ -2091,7 +2093,7 @@ static struct scsi_host_template driver_template = {
.eh_abort_handler = ibmvscsi_eh_abort_handler,
.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
- .slave_configure = ibmvscsi_slave_configure,
+ .sdev_configure = ibmvscsi_sdev_configure,
.change_queue_depth = ibmvscsi_change_queue_depth,
.host_reset = ibmvscsi_host_reset,
.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 625fd547ee60..8648bd965287 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2941,7 +2941,7 @@ static void initio_remove_one(struct pci_dev *pdev)
MODULE_LICENSE("GPL");
-static struct pci_device_id initio_pci_tbl[] = {
+static const struct pci_device_id initio_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 31cf2d31cceb..d89135fb8faa 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -873,7 +873,7 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET);
- del_timer(&ipr_cmd->timer);
+ timer_delete(&ipr_cmd->timer);
ipr_cmd->done(ipr_cmd);
}
spin_unlock(&hrrq->_lock);
@@ -3366,7 +3366,7 @@ static void ipr_worker_thread(struct work_struct *work)
* number of bytes printed to buffer
**/
static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -3383,13 +3383,13 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
return ret;
}
-static struct bin_attribute ipr_trace_attr = {
+static const struct bin_attribute ipr_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
- .read = ipr_read_trace,
+ .read_new = ipr_read_trace,
};
#endif
@@ -4087,7 +4087,7 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
};
static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4111,7 +4111,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
}
static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4134,14 +4134,14 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_ioa_async_err_log = {
+static const struct bin_attribute ipr_ioa_async_err_log = {
.attr = {
.name = "async_err_log",
.mode = S_IRUGO | S_IWUSR,
},
.size = 0,
- .read = ipr_read_async_err_log,
- .write = ipr_next_async_err_log
+ .read_new = ipr_read_async_err_log,
+ .write_new = ipr_next_async_err_log
};
static struct attribute *ipr_ioa_attrs[] = {
@@ -4172,7 +4172,7 @@ ATTRIBUTE_GROUPS(ipr_ioa);
* number of bytes printed to buffer
**/
static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4361,7 +4361,7 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
* number of bytes printed to buffer
**/
static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *cdev = kobj_to_dev(kobj);
@@ -4385,14 +4385,14 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute ipr_dump_attr = {
+static const struct bin_attribute ipr_dump_attr = {
.attr = {
.name = "dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = ipr_read_dump,
- .write = ipr_write_dump
+ .read_new = ipr_read_dump,
+ .write_new = ipr_write_dump
};
#else
static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
@@ -4745,13 +4745,13 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
}
/**
- * ipr_slave_destroy - Unconfigure a SCSI device
+ * ipr_sdev_destroy - Unconfigure a SCSI device
* @sdev: scsi device struct
*
* Return value:
* nothing
**/
-static void ipr_slave_destroy(struct scsi_device *sdev)
+static void ipr_sdev_destroy(struct scsi_device *sdev)
{
struct ipr_resource_entry *res;
struct ipr_ioa_cfg *ioa_cfg;
@@ -4769,7 +4769,7 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
}
/**
- * ipr_device_configure - Configure a SCSI device
+ * ipr_sdev_configure - Configure a SCSI device
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -4778,8 +4778,8 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
* Return value:
* 0 on success
**/
-static int ipr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int ipr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -4815,7 +4815,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
}
/**
- * ipr_slave_alloc - Prepare for commands to a device.
+ * ipr_sdev_init - Prepare for commands to a device.
* @sdev: scsi device struct
*
* This function saves a pointer to the resource entry
@@ -4826,7 +4826,7 @@ static int ipr_device_configure(struct scsi_device *sdev,
* Return value:
* 0 on success / -ENXIO if device does not exist
**/
-static int ipr_slave_alloc(struct scsi_device *sdev)
+static int ipr_sdev_init(struct scsi_device *sdev)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
struct ipr_resource_entry *res;
@@ -5347,7 +5347,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
list_del(&ioa_cfg->reset_cmd->queue);
- del_timer(&ioa_cfg->reset_cmd->timer);
+ timer_delete(&ioa_cfg->reset_cmd->timer);
ipr_reset_ioa_job(ioa_cfg->reset_cmd);
return IRQ_HANDLED;
}
@@ -5362,7 +5362,7 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
list_del(&ioa_cfg->reset_cmd->queue);
- del_timer(&ioa_cfg->reset_cmd->timer);
+ timer_delete(&ioa_cfg->reset_cmd->timer);
ipr_reset_ioa_job(ioa_cfg->reset_cmd);
} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
if (ioa_cfg->clear_isr) {
@@ -5481,7 +5481,7 @@ static int ipr_iopoll(struct irq_poll *iop, int budget)
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
- del_timer(&ipr_cmd->timer);
+ timer_delete(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
@@ -5550,7 +5550,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
- del_timer(&ipr_cmd->timer);
+ timer_delete(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
return rc;
@@ -5600,7 +5600,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
- del_timer(&ipr_cmd->timer);
+ timer_delete(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
return rc;
@@ -6398,9 +6398,9 @@ static const struct scsi_host_template driver_template = {
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
- .slave_alloc = ipr_slave_alloc,
- .device_configure = ipr_device_configure,
- .slave_destroy = ipr_slave_destroy,
+ .sdev_init = ipr_sdev_init,
+ .sdev_configure = ipr_sdev_configure,
+ .sdev_destroy = ipr_sdev_destroy,
.scan_finished = ipr_scan_finished,
.target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
@@ -9844,7 +9844,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
}
}
-static struct pci_device_id ipr_pci_table[] = {
+static const struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 10cf5775a939..94adb6ac02a4 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -364,7 +364,7 @@ static struct scsi_host_template ips_driver_template = {
.proc_name = "ips",
.show_info = ips_show_info,
.write_info = ips_write_info,
- .slave_configure = ips_slave_configure,
+ .sdev_configure = ips_sdev_configure,
.bios_param = ips_biosparam,
.this_id = -1,
.sg_tablesize = IPS_MAX_SG,
@@ -1166,7 +1166,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/****************************************************************************/
/* */
-/* Routine Name: ips_slave_configure */
+/* Routine Name: ips_sdev_configure */
/* */
/* Routine Description: */
/* */
@@ -1174,7 +1174,7 @@ static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
/* */
/****************************************************************************/
static int
-ips_slave_configure(struct scsi_device * SDptr)
+ips_sdev_configure(struct scsi_device *SDptr, struct queue_limits *lim)
{
ips_ha_t *ha;
int min;
@@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
break;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
scb->scsi_cmd->result = DID_OK << 16;
break;
@@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
case WRITE_6:
case READ_10:
case WRITE_10:
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
break;
case MODE_SENSE:
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 65edf000e447..8ac932ec4444 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -400,7 +400,8 @@
*/
static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int geom[]);
- static int ips_slave_configure(struct scsi_device *SDptr);
+ static int ips_sdev_configure(struct scsi_device *SDptr,
+ struct queue_limits *lim);
/*
* Raid Command Formats
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 35589b6af90d..c108b5b940c3 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1271,22 +1271,22 @@ void isci_host_deinit(struct isci_host *ihost)
/* Cancel any/all outstanding port timers */
for (i = 0; i < ihost->logical_port_entries; i++) {
struct isci_port *iport = &ihost->ports[i];
- del_timer_sync(&iport->timer.timer);
+ timer_delete_sync(&iport->timer.timer);
}
/* Cancel any/all outstanding phy timers */
for (i = 0; i < SCI_MAX_PHYS; i++) {
struct isci_phy *iphy = &ihost->phys[i];
- del_timer_sync(&iphy->sata_timer.timer);
+ timer_delete_sync(&iphy->sata_timer.timer);
}
- del_timer_sync(&ihost->port_agent.timer.timer);
+ timer_delete_sync(&ihost->port_agent.timer.timer);
- del_timer_sync(&ihost->power_control.timer.timer);
+ timer_delete_sync(&ihost->power_control.timer.timer);
- del_timer_sync(&ihost->timer.timer);
+ timer_delete_sync(&ihost->timer.timer);
- del_timer_sync(&ihost->phy_timer.timer);
+ timer_delete_sync(&ihost->phy_timer.timer);
}
static void __iomem *scu_base(struct isci_host *isci_host)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 73085d2f5c43..acf0c2038d20 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -91,31 +91,31 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 2;
+static unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
-u16 ssp_max_occ_to = 20;
+static u16 ssp_max_occ_to = 20;
module_param(ssp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
-u16 stp_max_occ_to = 5;
+static u16 stp_max_occ_to = 5;
module_param(stp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
-u16 ssp_inactive_to = 5;
+static u16 ssp_inactive_to = 5;
module_param(ssp_inactive_to, ushort, 0);
MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
-u16 stp_inactive_to = 5;
+static u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+static unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
-unsigned char max_concurr_spinup;
+static unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 4e6b1decbca7..d827e49c1d55 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -473,13 +473,6 @@ static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
dest[word_cnt] = swab32(src[word_cnt]);
}
-extern unsigned char no_outbound_task_to;
-extern u16 ssp_max_occ_to;
-extern u16 stp_max_occ_to;
-extern u16 ssp_inactive_to;
-extern u16 stp_inactive_to;
-extern unsigned char phy_gen;
-extern unsigned char max_concurr_spinup;
extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
@@ -488,9 +481,9 @@ irqreturn_t isci_error_isr(int vec, void *data);
/*
* Each timer is associated with a cancellation flag that is set when
- * del_timer() is called and checked in the timer callback function. This
- * is needed since del_timer_sync() cannot be called with sci_lock held.
- * For deinit however, del_timer_sync() is used without holding the lock.
+ * timer_delete() is called and checked in the timer callback function. This
+ * is needed since timer_delete_sync() cannot be called with sci_lock held.
+ * For deinit however, timer_delete_sync() is used without holding the lock.
*/
struct sci_timer {
struct timer_list timer;
@@ -513,7 +506,7 @@ static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
static inline void sci_del_timer(struct sci_timer *tmr)
{
tmr->cancel = true;
- del_timer(&tmr->timer);
+ timer_delete(&tmr->timer);
}
struct sci_base_state_machine {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 866950a02965..287e1ba8ddd7 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -422,21 +422,6 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
}
}
-enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- if (state != SCI_DEV_RESETTING) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- }
-
- sci_change_state(sm, SCI_DEV_READY);
- return SCI_SUCCESS;
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
@@ -1694,20 +1679,6 @@ enum sci_status sci_remote_device_abort_requests_pending_abort(
return sci_remote_device_terminate_reqs_checkabort(idev, 1);
}
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev)
-{
- unsigned long flags;
- enum sci_status status;
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset_complete(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- return status;
-}
-
void isci_dev_set_hang_detection_timeout(
struct isci_remote_device *idev,
u32 timeout)
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 3ad681c4c20a..561ae3f2cbbd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -175,19 +175,6 @@ enum sci_status sci_remote_device_reset(
struct isci_remote_device *idev);
/**
- * sci_remote_device_reset_complete() - This method informs the device object
- * that the reset operation is complete and the device can resume operation
- * again.
- * @remote_device: This parameter specifies the device which is to be informed
- * of the reset complete operation.
- *
- * An indication that the device is resuming operation. SCI_SUCCESS the device
- * is resuming operation.
- */
-enum sci_status sci_remote_device_reset_complete(
- struct isci_remote_device *idev);
-
-/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
@@ -211,7 +198,7 @@ enum sci_status sci_remote_device_reset_complete(
* device. When there are no active IO for the device it is is in this
* state.
*
- * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * @SCI_STP_DEV_CMD: This is the command state for the STP remote
* device. This state is entered when the device is processing a
* non-NCQ command. The device object will fail any new start IO
* requests until this command is complete.
@@ -364,10 +351,6 @@ enum sci_status isci_remote_device_reset(
struct isci_host *ihost,
struct isci_remote_device *idev);
-enum sci_status isci_remote_device_reset_complete(
- struct isci_host *ihost,
- struct isci_remote_device *idev);
-
enum sci_status isci_remote_device_suspend_terminate(
struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c708e1059638..7b4fe0e6afb2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -17,7 +17,6 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
@@ -468,8 +467,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
- hdr + hdrlen);
+ iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -494,7 +492,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -507,11 +505,10 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
- sg, count, offset, len,
- NULL, tx_hash);
+ sg, count, offset, len, NULL, tx_crcp);
}
static void
@@ -520,7 +517,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -532,10 +529,10 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
- data, len, NULL, tx_hash);
+ data, len, NULL, tx_crcp);
}
static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
@@ -583,7 +580,6 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
- struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -596,37 +592,9 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
mutex_init(&tcp_sw_conn->sock_lock);
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto free_conn;
-
- tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->tx_hash)
- goto free_tfm;
- ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
-
- tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->rx_hash)
- goto free_tx_hash;
- ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
-
- tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
+ tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc;
return cls_conn;
-
-free_tx_hash:
- ahash_request_free(tcp_sw_conn->tx_hash);
-free_tfm:
- crypto_free_ahash(tfm);
-free_conn:
- iscsi_conn_printk(KERN_ERR, conn,
- "Could not create connection due to crc32c "
- "loading error. Make sure the crc32c "
- "module is built as a module or into the "
- "kernel\n");
- iscsi_tcp_conn_teardown(cls_conn);
- return NULL;
}
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
@@ -664,20 +632,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
iscsi_sw_tcp_release_conn(conn);
-
- ahash_request_free(tcp_sw_conn->rx_hash);
- if (tcp_sw_conn->tx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
- ahash_request_free(tcp_sw_conn->tx_hash);
- crypto_free_ahash(tfm);
- }
-
iscsi_tcp_conn_teardown(cls_conn);
}
@@ -1057,8 +1013,8 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
return 0;
}
-static int iscsi_sw_tcp_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int iscsi_sw_tcp_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host);
struct iscsi_session *session = tcp_sw_host->session;
@@ -1083,7 +1039,7 @@ static const struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
- .device_configure = iscsi_sw_tcp_device_configure,
+ .sdev_configure = iscsi_sw_tcp_sdev_configure,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 89a6fc552f0b..c3e5d9fa6add 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -41,8 +41,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct ahash_request *tx_hash; /* CRC32C (Tx) */
- struct ahash_request *rx_hash; /* CRC32C (Rx) */
+ u32 tx_crc; /* CRC32C (Tx) */
+ u32 rx_crc; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 80be3a936d92..e705c30b4e1b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1329,7 +1329,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->state |= FC_SRB_COMPL;
spin_unlock_bh(&fsp->scsi_pkt_lock);
- del_timer_sync(&fsp->timer);
+ timer_delete_sync(&fsp->timer);
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
@@ -1961,7 +1961,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
fsp->state |= FC_SRB_COMPL;
if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
spin_unlock_bh(&fsp->scsi_pkt_lock);
- del_timer_sync(&fsp->timer);
+ timer_delete_sync(&fsp->timer);
spin_lock_bh(&fsp->scsi_pkt_lock);
}
@@ -2222,13 +2222,13 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
EXPORT_SYMBOL(fc_eh_host_reset);
/**
- * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * fc_sdev_init() - Configure the queue depth of a Scsi_Host
* @sdev: The SCSI device that identifies the SCSI host
*
* Configures queue depth based on host's cmd_per_len. If not set
* then we use the libfc default.
*/
-int fc_slave_alloc(struct scsi_device *sdev)
+int fc_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -2238,7 +2238,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
return 0;
}
-EXPORT_SYMBOL(fc_slave_alloc);
+EXPORT_SYMBOL(fc_sdev_init);
/**
* fc_fcp_destroy() - Tear down the FCP layer for a given local port
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 2b1bf990a9dc..1ddaf7228340 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1945,7 +1945,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
session->tmf_state != TMF_QUEUED);
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&session->tmf_timer);
+ timer_delete_sync(&session->tmf_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -3247,7 +3247,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
iscsi_remove_conn(cls_conn);
- del_timer_sync(&conn->transport_timer);
+ timer_delete_sync(&conn->transport_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -3411,7 +3411,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
conn->stop_stage = flag;
spin_unlock_bh(&session->frwd_lock);
- del_timer_sync(&conn->transport_timer);
+ timer_delete_sync(&conn->transport_timer);
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index c182aa83f2c9..e90805ba868f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -15,7 +15,7 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
@@ -168,7 +168,7 @@ iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
segment->size = ISCSI_DIGEST_SIZE;
segment->copied = 0;
segment->sg = NULL;
- segment->hash = NULL;
+ segment->crcp = NULL;
}
/**
@@ -191,29 +191,27 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
struct iscsi_segment *segment, int recv,
unsigned copied)
{
- struct scatterlist sg;
unsigned int pad;
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
segment->copied, copied, segment->size,
recv ? "recv" : "xmit");
- if (segment->hash && copied) {
- /*
- * If a segment is kmapd we must unmap it before sending
- * to the crypto layer since that will try to kmap it again.
- */
- iscsi_tcp_segment_unmap(segment);
-
- if (!segment->data) {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, sg_page(segment->sg), copied,
- segment->copied + segment->sg_offset +
- segment->sg->offset);
- } else
- sg_init_one(&sg, segment->data + segment->copied,
- copied);
- ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
- crypto_ahash_update(segment->hash);
+ if (segment->crcp && copied) {
+ if (segment->data) {
+ *segment->crcp = crc32c(*segment->crcp,
+ segment->data + segment->copied,
+ copied);
+ } else {
+ const void *data;
+
+ data = kmap_local_page(sg_page(segment->sg));
+ *segment->crcp = crc32c(*segment->crcp,
+ data + segment->copied +
+ segment->sg_offset +
+ segment->sg->offset,
+ copied);
+ kunmap_local(data);
+ }
}
segment->copied += copied;
@@ -258,10 +256,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* Set us up for transferring the data digest. hdr digest
* is completely handled in hdr done function.
*/
- if (segment->hash) {
- ahash_request_set_crypt(segment->hash, NULL,
- segment->digest, 0);
- crypto_ahash_final(segment->hash);
+ if (segment->crcp) {
+ put_unaligned_le32(~*segment->crcp, segment->digest);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -282,8 +278,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
* given buffer, and returns the number of bytes
* consumed, which can actually be less than @len.
*
- * If hash digest is enabled, the function will update the
- * hash while copying.
+ * If CRC is enabled, the function will update the CRC while copying.
* Combining these two operations doesn't buy us a lot (yet),
* but in the future we could implement combined copy+crc,
* just way we do for network layer checksums.
@@ -311,14 +306,10 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
- size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
{
- struct scatterlist sg;
-
- sg_init_one(&sg, hdr, hdrlen);
- ahash_request_set_crypt(hash, &sg, digest, hdrlen);
- crypto_ahash_digest(hash);
+ put_unaligned_le32(~crc32c(~0, hdr, hdrlen), digest);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -343,24 +334,23 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
segment->done = done;
- if (hash) {
- segment->hash = hash;
- crypto_ahash_init(hash);
+ if (crcp) {
+ segment->crcp = crcp;
+ *crcp = ~0;
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
- size_t size, iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ size_t size, iscsi_segment_done_fn_t *done, u32 *crcp)
{
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
segment->data = data;
segment->size = size;
}
@@ -370,13 +360,12 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
struct scatterlist *sg;
unsigned int i;
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
for_each_sg(sg_list, sg, sg_count, i) {
if (offset < sg->length) {
iscsi_tcp_segment_init_sg(segment, sg, offset);
@@ -393,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
* iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
* @tcp_conn: iscsi connection to prep for
*
- * This function always passes NULL for the hash argument, because when this
+ * This function always passes NULL for the crcp argument, because when this
* function is called we do not yet know the final size of the header and want
* to delay the digest processing until we know that.
*/
@@ -434,15 +423,15 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
iscsi_segment_init_linear(&tcp_conn->in.segment,
conn->data, tcp_conn->in.datalen,
- iscsi_tcp_data_recv_done, rx_hash);
+ iscsi_tcp_data_recv_done, rx_crcp);
}
/**
@@ -730,7 +719,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
struct scsi_data_buffer *sdb = &task->sc->sdb;
/*
@@ -743,7 +732,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
*/
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
"offset=%d, datalen=%d)\n",
@@ -756,7 +745,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_task->data_offset,
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
- rx_hash);
+ rx_crcp);
spin_unlock(&conn->session->back_lock);
return rc;
}
@@ -878,7 +867,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
return 0;
}
- iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ iscsi_tcp_dgst_header(hdr,
segment->total_copied - ISCSI_DIGEST_SIZE,
segment->digest);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 2b8004eb6f1b..869b5d4db44c 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -89,7 +89,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer_sync(&task->slow_task->timer);
+ timer_delete_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index da11d32840e2..feb2461b90e8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -804,15 +804,14 @@ EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
-int sas_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+int sas_sdev_configure(struct scsi_device *scsi_dev, struct queue_limits *lim)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
- ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap);
+ ata_sas_sdev_configure(scsi_dev, lim, dev->sata_dev.ap);
return 0;
}
@@ -830,7 +829,7 @@ int sas_device_configure(struct scsi_device *scsi_dev,
return 0;
}
-EXPORT_SYMBOL_GPL(sas_device_configure);
+EXPORT_SYMBOL_GPL(sas_sdev_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
@@ -860,7 +859,7 @@ EXPORT_SYMBOL_GPL(sas_bios_param);
void sas_task_internal_done(struct sas_task *task)
{
- del_timer(&task->slow_task->timer);
+ timer_delete(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -912,7 +911,7 @@ static int sas_execute_internal_abort(struct domain_device *device,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer_sync(&task->slow_task->timer);
+ timer_delete_sync(&task->slow_task->timer);
pr_err("Executing internal abort failed %016llx (%d)\n",
SAS_ADDR(device->sas_addr), res);
break;
@@ -1011,7 +1010,7 @@ int sas_execute_tmf(struct domain_device *device, void *parameter,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer_sync(&task->slow_task->timer);
+ timer_delete_sync(&task->slow_task->timer);
pr_err("executing TMF task failed %016llx (%d)\n",
SAS_ADDR(device->sas_addr), res);
break;
@@ -1181,7 +1180,7 @@ void sas_task_abort(struct sas_task *task)
if (!slow)
return;
- if (!del_timer(&slow->timer))
+ if (!timer_delete(&slow->timer))
return;
slow->timer.function(&slow->timer);
return;
@@ -1194,14 +1193,14 @@ void sas_task_abort(struct sas_task *task)
}
EXPORT_SYMBOL_GPL(sas_task_abort);
-int sas_slave_alloc(struct scsi_device *sdev)
+int sas_sdev_init(struct scsi_device *sdev)
{
if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
return -ENXIO;
return 0;
}
-EXPORT_SYMBOL_GPL(sas_slave_alloc);
+EXPORT_SYMBOL_GPL(sas_sdev_init);
void sas_target_destroy(struct scsi_target *starget)
{
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..fe4fb67eb50c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -74,8 +74,7 @@ struct lpfc_sli2_slim;
* queue depths when there are driver resource error or Firmware
* resource error.
*/
-/* 1 Second */
-#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
+#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -1715,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 39b504164ecc..397216ff2c7e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2578,7 +2578,7 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
(old_val & DISABLE_FCP_RING_INT))
{
spin_unlock_irq(&phba->hbalock);
- del_timer(&phba->fcp_poll_timer);
+ timer_delete(&phba->fcp_poll_timer);
spin_lock_irq(&phba->hbalock);
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
spin_unlock_irq(&phba->hbalock);
@@ -6185,7 +6185,7 @@ const struct attribute_group *lpfc_vport_groups[] = {
**/
static ssize_t
sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6244,7 +6244,7 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
size_t buf_off;
@@ -6280,14 +6280,14 @@ sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_ctlreg_attr = {
+static const struct bin_attribute sysfs_ctlreg_attr = {
.attr = {
.name = "ctlreg",
.mode = S_IRUSR | S_IWUSR,
},
.size = 256,
- .read = sysfs_ctlreg_read,
- .write = sysfs_ctlreg_write,
+ .read_new = sysfs_ctlreg_read,
+ .write_new = sysfs_ctlreg_write,
};
/**
@@ -6308,7 +6308,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
@@ -6332,20 +6332,20 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
return -EPERM;
}
-static struct bin_attribute sysfs_mbox_attr = {
+static const struct bin_attribute sysfs_mbox_attr = {
.attr = {
.name = "mbox",
.mode = S_IRUSR | S_IWUSR,
},
.size = MAILBOX_SYSFS_MAX,
- .read = sysfs_mbox_read,
- .write = sysfs_mbox_write,
+ .read_new = sysfs_mbox_read,
+ .write_new = sysfs_mbox_write,
};
/**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 1c6b024160da..c8f8496bbdf8 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -120,6 +120,16 @@ enum ELX_LOOPBACK_CMD {
#define ELX_LOOPBACK_HEADER_SZ \
(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+/* For non-embedded read object command */
+#define READ_OBJ_EMB0_SCHEME_0 {1, 10, 256, 128}
+#define READ_OBJ_EMB0_SCHEME_1 {11, LPFC_EMB0_MAX_RD_OBJ_HBD_CNT, 512, 192}
+static const struct lpfc_read_object_cmd_scheme {
+ u32 min_hbd_cnt;
+ u32 max_hbd_cnt;
+ u32 cmd_size;
+ u32 payload_word_offset;
+} rd_obj_scheme[2] = {READ_OBJ_EMB0_SCHEME_0, READ_OBJ_EMB0_SCHEME_1};
+
struct lpfc_dmabufext {
struct lpfc_dmabuf dma;
uint32_t size;
@@ -3539,6 +3549,103 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
}
/**
+ * lpfc_rd_obj_emb0_handle_job - Handles completion for non-embedded
+ * READ_OBJECT_V0 mailbox commands
+ * @phba: pointer to lpfc_hba data struct
+ * @pmb_buf: pointer to mailbox buffer
+ * @sli_cfg_mbx: pointer to SLI_CONFIG mailbox memory region
+ * @job: pointer to bsg_job struct
+ * @bsg_reply: point to bsg_reply struct
+ *
+ * Given a non-embedded READ_OBJECT_V0's HBD_CNT, this routine copies
+ * a READ_OBJECT_V0 mailbox command's read data payload into a bsg_job
+ * structure for passing back to application layer.
+ *
+ * Return codes
+ * 0 - successful
+ * -EINVAL - invalid HBD_CNT
+ * -ENODEV - pointer to bsg_job struct is NULL
+ **/
+static int
+lpfc_rd_obj_emb0_handle_job(struct lpfc_hba *phba, u8 *pmb_buf,
+ struct lpfc_sli_config_mbox *sli_cfg_mbx,
+ struct bsg_job *job,
+ struct fc_bsg_reply *bsg_reply)
+{
+ struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ u32 hbd_cnt;
+ u32 dma_buf_len;
+ u8 i = 0;
+ size_t extra_bytes;
+ off_t skip = 0;
+
+ if (!job) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2496 NULL job\n");
+ return -ENODEV;
+ }
+
+ if (!bsg_reply) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2498 NULL bsg_reply\n");
+ return -ENODEV;
+ }
+
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
+
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+
+ /* Calculate where the read object's read data payload is located based
+ * on HBD count scheme.
+ */
+ if (hbd_cnt >= rd_obj_scheme[0].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[0].max_hbd_cnt) {
+ skip = rd_obj_scheme[0].payload_word_offset * 4;
+ } else if (hbd_cnt >= rd_obj_scheme[1].min_hbd_cnt &&
+ hbd_cnt <= rd_obj_scheme[1].max_hbd_cnt) {
+ skip = rd_obj_scheme[1].payload_word_offset * 4;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2497 bad hbd_count 0x%08x\n",
+ hbd_cnt);
+ return -EINVAL;
+ }
+
+ /* Copy SLI_CONFIG command and READ_OBJECT response first */
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ pmb_buf, skip);
+
+ /* Copy data from hbds */
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list,
+ list) {
+ dma_buf_len = emb0_subsys->hbd[i].buf_len;
+
+ /* Use sg_copy_buffer to specify a skip offset */
+ extra_bytes = sg_copy_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ curr_dmabuf->virt,
+ dma_buf_len, skip, false);
+
+ bsg_reply->reply_payload_rcv_len += extra_bytes;
+
+ skip += extra_bytes;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2499 copied hbd[%d] "
+ "0x%zx bytes\n",
+ i, extra_bytes);
+ i++;
+ }
+
+ return 0;
+}
+
+/**
* lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
@@ -3551,10 +3658,10 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct bsg_job_data *dd_data;
struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
+ struct fc_bsg_reply *bsg_reply = NULL;
uint8_t *pmb, *pmb_buf;
unsigned long flags;
- uint32_t size;
+ u32 size, opcode;
int rc = 0;
struct lpfc_dmabuf *dmabuf;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -3591,6 +3698,24 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
&pmbx[sizeof(MAILBOX_t)],
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ if (job) {
+ rc = lpfc_rd_obj_emb0_handle_job(phba, pmb_buf,
+ sli_cfg_mbx,
+ job,
+ bsg_reply);
+ bsg_reply->result = rc;
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
}
/* Complete the job if the job is still active */
@@ -3604,12 +3729,14 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/* result for successful */
bsg_reply->result = 0;
+done:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer mailbox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
- phba->mbox_ext_buf_ctx.mboxType, size);
+ phba->mbox_ext_buf_ctx.mboxType,
+ job->reply_payload.payload_len);
lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType,
@@ -3819,14 +3946,16 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
{
struct fc_bsg_request *bsg_request = job->request;
struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ struct lpfc_sli_config_emb0_subsys *emb0_subsys;
+ struct list_head *ext_dmabuf_list;
struct dfc_mbox_req *mbox_req;
struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
- uint32_t ext_buf_cnt, ext_buf_index;
+ u32 ext_buf_cnt, ext_buf_index, hbd_cnt;
struct lpfc_dmabuf *ext_dmabuf = NULL;
struct bsg_job_data *dd_data = NULL;
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
- uint8_t *pmbx;
+ u8 *pmbx, opcode;
int rc, i;
mbox_req =
@@ -3836,8 +3965,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
if (nemb_tp == nemb_mse) {
+ emb0_subsys = &sli_cfg_mbx->un.sli_config_emb0_subsys;
ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
- &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
+ &emb0_subsys->sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2945 Handled SLI_CONFIG(mse) rd, "
@@ -3847,6 +3977,57 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
rc = -ERANGE;
goto job_error;
}
+
+ /* Special handling for non-embedded READ_OBJECT */
+ opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, emb0_subsys);
+ switch (opcode) {
+ case COMN_OPCODE_READ_OBJECT:
+ hbd_cnt = bsg_bf_get(lpfc_emb0_subcmnd_rd_obj_hbd_cnt,
+ emb0_subsys);
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2449 SLI_CONFIG(mse) rd non-embedded "
+ "hbd count = %d\n",
+ hbd_cnt);
+
+ ext_dmabuf_list =
+ &phba->mbox_ext_buf_ctx.ext_dmabuf_list;
+
+ /* Allocate hbds */
+ for (i = 0; i < hbd_cnt; i++) {
+ ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
+ if (!ext_dmabuf) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+ list_add_tail(&ext_dmabuf->list,
+ ext_dmabuf_list);
+ }
+
+ /* Fill out the physical memory addresses for the
+ * hbds
+ */
+ i = 0;
+ list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
+ ext_dmabuf_list, list) {
+ emb0_subsys->hbd[i].pa_hi =
+ putPaddrHigh(curr_dmabuf->phys);
+ emb0_subsys->hbd[i].pa_lo =
+ putPaddrLow(curr_dmabuf->phys);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "2495 SLI_CONFIG(hbd)[%d], "
+ "bufLen:%d, addrHi:x%x, "
+ "addrLo:x%x\n", i,
+ emb0_subsys->hbd[i].buf_len,
+ emb0_subsys->hbd[i].pa_hi,
+ emb0_subsys->hbd[i].pa_lo);
+ i++;
+ }
+ break;
+ default:
+ break;
+ }
+
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2941 Handled SLI_CONFIG(mse) rd, "
"ext_buf_cnt:%d\n", ext_buf_cnt);
@@ -4223,6 +4404,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
case COMN_OPCODE_GET_PROFILE_CONFIG:
case COMN_OPCODE_SET_FEATURES:
+ case COMN_OPCODE_READ_OBJECT:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_comn, opcode:x%x\n",
@@ -4665,8 +4847,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
bsg_reply->reply_payload_rcv_len = 0;
/* sanity check to protect driver */
- if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
- job->request_payload.payload_len > BSG_MBOX_SIZE) {
+ if (job->request_payload.payload_len > BSG_MBOX_SIZE) {
rc = -ERANGE;
goto job_done;
}
@@ -4737,6 +4918,19 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
pmb->mbxOwner = OWN_HOST;
pmboxq->vport = vport;
+ /* non-embedded SLI_CONFIG requests already parsed, check others */
+ if (unlikely(job->reply_payload.payload_len > BSG_MBOX_SIZE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2729 Cmd x%x (x%x/x%x) request has "
+ "out-of-range reply payload length x%x\n",
+ pmb->mbxCommand,
+ lpfc_sli_config_mbox_subsys_get(phba, pmboxq),
+ lpfc_sli_config_mbox_opcode_get(phba, pmboxq),
+ job->reply_payload.payload_len);
+ rc = -ERANGE;
+ goto job_done;
+ }
+
/* If HBA encountered an error attention, allow only DUMP
* or RESTART mailbox commands until the HBA is restarted.
*/
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 3c04ca2d7455..27e7a033b53d 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -239,12 +239,27 @@ struct lpfc_sli_config_emb0_subsys {
uint32_t timeout; /* comn_set_feature timeout */
uint32_t request_length; /* comn_set_feature request len */
uint32_t version; /* comn_set_feature version */
- uint32_t csf_feature; /* comn_set_feature feature */
+ uint32_t word68; /* comn_set_feature feature */
+#define lpfc_emb0_subcmnd_csf_feat_SHIFT 0
+#define lpfc_emb0_subcmnd_csf_feat_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_csf_feat_WORD word68
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_MASK 0x00ffffff
+#define lpfc_emb0_subcmnd_rd_obj_des_rd_len_WORD word68
uint32_t word69; /* comn_set_feature parameter len */
uint32_t word70; /* comn_set_feature parameter val0 */
#define lpfc_emb0_subcmnd_csf_p0_SHIFT 0
#define lpfc_emb0_subcmnd_csf_p0_MASK 0x3
#define lpfc_emb0_subcmnd_csf_p0_WORD word70
+ uint32_t reserved71[25];
+ uint32_t word96; /* rd_obj hbd_count */
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_SHIFT 0
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_MASK 0xffffffff
+#define lpfc_emb0_subcmnd_rd_obj_hbd_cnt_WORD word96
+#define LPFC_EMB0_MAX_RD_OBJ_HBD_CNT 31
+ struct lpfc_sli_config_hbd hbd[LPFC_EMB0_MAX_RD_OBJ_HBD_CNT];
+ uint32_t word190;
+ uint32_t word191;
};
struct lpfc_sli_config_emb1_subsys {
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 30891ad17e2a..12c67cdd7c19 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1646,14 +1646,12 @@ out:
/* If the caller wanted a synchronous DA_ID completion, signal the
* wait obj and clear flag to reset the vport.
*/
- if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
+ if (test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags)) {
if (ndlp->da_id_waitq)
wake_up(ndlp->da_id_waitq);
}
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 3e173b5d00e0..3d47dc7458d1 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -85,13 +85,13 @@ enum lpfc_fc4_xpt_flags {
NLP_XPT_HAS_HH = 0x10
};
-enum lpfc_nlp_save_flags {
+enum lpfc_nlp_save_flags { /* mask bits */
/* devloss occurred during recovery */
- NLP_IN_RECOV_POST_DEV_LOSS = 0x1,
+ NLP_IN_RECOV_POST_DEV_LOSS,
/* wait for outstanding LOGO to cmpl */
- NLP_WAIT_FOR_LOGO = 0x2,
+ NLP_WAIT_FOR_LOGO,
/* wait for outstanding DA_ID to finish */
- NLP_WAIT_FOR_DA_ID = 0x4
+ NLP_WAIT_FOR_DA_ID
};
struct lpfc_nodelist {
@@ -154,7 +154,7 @@ struct lpfc_nodelist {
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
- enum lpfc_nlp_save_flags save_flags;
+ unsigned long save_flags;
enum lpfc_fc4_xpt_flags fc4_xpt_flags;
@@ -208,7 +208,6 @@ enum lpfc_nlp_flag {
NPR list */
NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */
NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */
- NLP_TARGET_REMOVE = 28, /* Target remove in process */
NLP_SC_REQ = 29, /* Target requires authentication */
NLP_FIRSTBURST = 30, /* Target supports FirstBurst */
NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 37f0a930d469..375a879c31f1 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2988,12 +2988,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
wake_up_waiter = 1;
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- }
- spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
@@ -3035,19 +3031,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Call state machine. This will unregister the rpi if needed. */
lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
- if (skip_recovery)
- goto out;
-
- /* The driver sets this flag for an NPIV instance that doesn't want to
- * log into the remote port.
- */
- if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) {
- clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag);
- lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_DEVICE_RM);
- goto out_rsrc_free;
- }
-
out:
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
@@ -3091,7 +3074,7 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
-out_rsrc_free:
+
/* Driver is done with the I/O. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -4350,7 +4333,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag))
return;
- del_timer_sync(&nlp->nlp_delayfunc);
+ timer_delete_sync(&nlp->nlp_delayfunc);
nlp->nlp_last_elscmd = 0;
if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
list_del_init(&nlp->els_retry_evt.evt_listp);
@@ -4448,7 +4431,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
* firing and before processing the timer, cancel the
* nlp_delayfunc.
*/
- del_timer_sync(&ndlp->nlp_delayfunc);
+ timer_delete_sync(&ndlp->nlp_delayfunc);
retry = ndlp->nlp_retry;
ndlp->nlp_retry = 0;
@@ -4583,6 +4566,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int link_reset = 0, rc;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 rsn_code_exp = 0;
/* Note: cmd_dmabuf may be 0 for internal driver abort
@@ -4798,11 +4782,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case LSRJT_LOGICAL_BSY:
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
if ((cmd == ELS_CMD_PLOGI) ||
(cmd == ELS_CMD_PRLI) ||
(cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = 48;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (cmd == ELS_CMD_PLOGI &&
+ rsn_code_exp == LSEXP_AUTH_REQ)
+ maxretry = 8;
} else if (cmd == ELS_CMD_FDISC) {
/* FDISC retry policy */
maxretry = 48;
@@ -4831,6 +4826,20 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0820 FLOGI (x%x). "
"BBCredit Not Supported\n",
stat.un.lsRjtError);
+ } else if (cmd == ELS_CMD_PLOGI) {
+ rsn_code_exp = stat.un.b.lsRjtRsnCodeExp;
+
+ /* An authentication LS_RJT reason code
+ * explanation means some error in the
+ * security settings end-to-end. Reduce
+ * the retry count to allow lpfc to clear
+ * RSCN mode and not race with dev_loss.
+ */
+ if (rsn_code_exp == LSEXP_AUTH_REQ) {
+ delay = 1000;
+ retry = 1;
+ maxretry = 8;
+ }
}
break;
@@ -8036,8 +8045,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies +
- msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
return 0;
}
@@ -8072,7 +8080,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies + msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
@@ -9502,7 +9510,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (!list_empty(&pring->txcmplq))
if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
}
/**
@@ -9560,18 +9568,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->vport != vport)
+ continue;
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2243 iotag = 0x%x cmd_flag = 0x%x "
- "ulp_command = 0x%x this_vport %x "
- "sli_flag = 0x%x\n",
+ "ulp_command = 0x%x sli_flag = 0x%x\n",
piocb->iotag, piocb->cmd_flag,
get_job_cmnd(phba, piocb),
- (piocb->vport == vport),
phba->sli.sli_flag);
- if (piocb->vport != vport)
- continue;
-
if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) {
if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
@@ -10411,8 +10417,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
-
lpfc_disc_state_machine(vport, ndlp, elsiocb,
NLP_EVT_RCV_PLOGI);
@@ -10892,7 +10896,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
- jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+ jiffies + secs_to_jiffies(phba->fc_ratov));
return;
}
@@ -11149,7 +11153,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
@@ -11498,15 +11502,13 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
- if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) {
/* Wake up lpfc_vport_delete if waiting...*/
if (ndlp->logo_waitq)
wake_up(ndlp->logo_waitq);
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
/* Safe to release resources now. */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4036a9838bb5..c256c3edd663 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -161,7 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_hba *phba;
struct lpfc_work_evt *evtp;
unsigned long iflags;
- bool nvme_reg = false;
+ bool drop_initial_node_ref = false;
ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
if (!ndlp)
@@ -188,8 +188,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- if (ndlp->fc4_xpt_flags & NVME_XPT_REGD)
- nvme_reg = true;
+ /* Only 1 thread can drop the initial node reference.
+ * If not registered for NVME and NLP_DROPPED flag is
+ * clear, remove the initial reference.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ drop_initial_node_ref = true;
/* The scsi_transport is done with the rport so lpfc cannot
* call to unregister.
@@ -200,13 +205,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
/* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node,
* unregister calls were made to the scsi and nvme
* transports and refcnt was already decremented. Clear
- * the NLP_XPT_REGD flag only if the NVME Rport is
+ * the NLP_XPT_REGD flag only if the NVME nrport is
* confirmed unregistered.
*/
- if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_nlp_put(ndlp); /* may free ndlp */
+
+ /* Release scsi transport reference */
+ lpfc_nlp_put(ndlp);
} else {
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
@@ -214,24 +222,24 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_unlock_irqrestore(&ndlp->lock, iflags);
}
- /* Only 1 thread can drop the initial node reference. If
- * another thread has set NLP_DROPPED, this thread is done.
- */
- if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag))
- return;
-
- set_bit(NLP_DROPPED, &ndlp->nlp_flag);
- lpfc_nlp_put(ndlp);
+ if (drop_initial_node_ref)
+ lpfc_nlp_put(ndlp);
return;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- /* check for recovered fabric node */
- if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- ndlp->nlp_DID == Fabric_DID)
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
return;
+ }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -414,12 +422,7 @@ void
lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
- unsigned long iflags;
-
- spin_lock_irqsave(&ndlp->lock, iflags);
- if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
- ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ if (test_and_clear_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags)) {
lpfc_nlp_get(ndlp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
"8438 Devloss timeout reversed on DID x%x "
@@ -427,9 +430,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
"port_state = x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
ndlp->nlp_flag, vport->port_state);
- return;
}
- spin_unlock_irqrestore(&ndlp->lock, iflags);
}
/**
@@ -546,9 +547,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, kref_read(&ndlp->kref),
ndlp, ndlp->nlp_flag,
vport->port_state);
- spin_lock_irqsave(&ndlp->lock, iflags);
- ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
- spin_unlock_irqrestore(&ndlp->lock, iflags);
+ set_bit(NLP_IN_RECOV_POST_DEV_LOSS, &ndlp->save_flags);
return fcf_inuse;
} else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Fabric node fully recovered before this dev_loss_tmo
@@ -1231,7 +1230,7 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
/* Stop delayed Nport discovery */
clear_bit(FC_DISC_DELAYED, &vport->fc_flag);
- del_timer_sync(&vport->delayed_disc_tmo);
+ timer_delete_sync(&vport->delayed_disc_tmo);
if (phba->sli_rev == LPFC_SLI_REV4 &&
vport->port_type == LPFC_PHYSICAL_PORT &&
@@ -1421,7 +1420,7 @@ lpfc_linkup(struct lpfc_hba *phba)
/* Unblock fabric iocbs if they are blocked */
clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
- del_timer_sync(&phba->fabric_block_timer);
+ timer_delete_sync(&phba->fabric_block_timer);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -3527,7 +3526,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3314 Toplogy changed was 0x%x is 0x%x\n",
+ "3314 Topology changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
@@ -4982,7 +4981,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
- mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+ mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo));
set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
@@ -5013,7 +5012,7 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
if (test_bit(FC_DISC_TMO, &vport->fc_flag) ||
timer_pending(&vport->fc_disctmo)) {
clear_bit(FC_DISC_TMO, &vport->fc_flag);
- del_timer_sync(&vport->fc_disctmo);
+ timer_delete_sync(&vport->fc_disctmo);
spin_lock_irqsave(&vport->work_port_lock, iflags);
vport->work_port_events &= ~WORKER_DISC_TMO;
spin_unlock_irqrestore(&vport->work_port_lock, iflags);
@@ -5056,7 +5055,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
case CMD_GEN_REQUEST64_CR:
if (iocb->ndlp == ndlp)
return 1;
- fallthrough;
+ break;
case CMD_ELS_REQUEST64_CR:
if (remote_id == ndlp->nlp_DID)
return 1;
@@ -5504,7 +5503,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = 0;
- del_timer_sync(&ndlp->nlp_delayfunc);
+ timer_delete_sync(&ndlp->nlp_delayfunc);
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
@@ -5573,6 +5572,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@@ -5587,14 +5587,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
- return ndlp;
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index d5c15742f7f2..32298285ea5e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -724,6 +724,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_AUTH_REQ 0x48
#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 26e1313ebb21..2dedb273b091 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1907,22 +1907,22 @@ struct lpfc_mbx_query_fw_config {
uint32_t asic_revision;
uint32_t physical_port;
uint32_t function_mode;
-#define LPFC_FCOE_INI_MODE 0x00000040
-#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_FC_INI_MODE 0x00000040
+#define LPFC_FC_TGT_MODE 0x00000080
#define LPFC_DUA_MODE 0x00000800
- uint32_t ulp0_mode;
-#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
-#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
- uint32_t ulp0_nap_words[12];
- uint32_t ulp1_mode;
- uint32_t ulp1_nap_words[12];
+ uint32_t oper_mode;
+ uint32_t rsvd9[2];
+ uint32_t wqid_base;
+ uint32_t wqid_tot;
+ uint32_t rqid_base;
+ uint32_t rqid_tot;
+ uint32_t rsvd15[19];
uint32_t function_capabilities;
uint32_t cqid_base;
uint32_t cqid_tot;
uint32_t eqid_base;
uint32_t eqid_tot;
- uint32_t ulp0_nap2_words[2];
- uint32_t ulp1_nap2_words[2];
+ uint32_t rsvd39[4];
} rsp;
};
@@ -3778,25 +3778,22 @@ struct lpfc_mbx_get_prof_cfg {
struct lpfc_controller_attribute {
uint32_t version_string[8];
uint32_t manufacturer_name[8];
- uint32_t supported_modes;
+ uint32_t rsvd16;
uint32_t word17;
-#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
-#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
-#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
-#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
-#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
#define lpfc_cntl_attr_flash_id_SHIFT 16
#define lpfc_cntl_attr_flash_id_MASK 0x000000ff
#define lpfc_cntl_attr_flash_id_WORD word17
- uint32_t mbx_da_struct_ver;
- uint32_t ep_fw_da_struct_ver;
+#define lpfc_cntl_attr_boot_enable_SHIFT 24
+#define lpfc_cntl_attr_boot_enable_MASK 0x00000001
+#define lpfc_cntl_attr_boot_enable_WORD word17
+ uint32_t rsvd18[2];
uint32_t ncsi_ver_str[3];
- uint32_t dflt_ext_timeout;
+ uint32_t rsvd23;
uint32_t model_number[8];
uint32_t description[16];
uint32_t serial_number[8];
- uint32_t ip_ver_str[8];
+ uint32_t ipl_name[5];
+ uint32_t rsvd61[3];
uint32_t fw_ver_str[8];
uint32_t bios_ver_str[8];
uint32_t redboot_ver_str[8];
@@ -3804,53 +3801,31 @@ struct lpfc_controller_attribute {
uint32_t flash_fw_ver_str[8];
uint32_t functionality;
uint32_t word105;
-#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
-#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
-#define lpfc_cntl_attr_max_cbd_len_WORD word105
#define lpfc_cntl_attr_asic_rev_SHIFT 16
#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
#define lpfc_cntl_attr_asic_rev_WORD word105
-#define lpfc_cntl_attr_gen_guid0_SHIFT 24
-#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid0_WORD word105
- uint32_t gen_guid1_12[3];
+ uint32_t rsvd106[3];
uint32_t word109;
-#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
-#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
-#define lpfc_cntl_attr_gen_guid13_14_WORD word109
-#define lpfc_cntl_attr_gen_guid15_SHIFT 16
-#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
-#define lpfc_cntl_attr_gen_guid15_WORD word109
#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
#define lpfc_cntl_attr_hba_port_cnt_WORD word109
- uint32_t word110;
-#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
-#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
-#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
-#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
-#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
-#define lpfc_cntl_attr_multi_func_dev_WORD word110
+ uint32_t rsvd110;
uint32_t word111;
-#define lpfc_cntl_attr_cache_valid_SHIFT 0
-#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
-#define lpfc_cntl_attr_cache_valid_WORD word111
#define lpfc_cntl_attr_hba_status_SHIFT 8
#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
#define lpfc_cntl_attr_hba_status_WORD word111
-#define lpfc_cntl_attr_max_domain_SHIFT 16
-#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
-#define lpfc_cntl_attr_max_domain_WORD word111
#define lpfc_cntl_attr_lnk_numb_SHIFT 24
#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
#define lpfc_cntl_attr_lnk_numb_WORD word111
#define lpfc_cntl_attr_lnk_type_SHIFT 30
#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
#define lpfc_cntl_attr_lnk_type_WORD word111
- uint32_t fw_post_status;
- uint32_t hba_mtu[8];
+ uint32_t rsvd112[9];
uint32_t word121;
- uint32_t reserved1[3];
+#define lpfc_cntl_attr_asic_gen_SHIFT 8
+#define lpfc_cntl_attr_asic_gen_MASK 0x000000ff
+#define lpfc_cntl_attr_asic_gen_WORD word121
+ uint32_t rsvd122[3];
uint32_t word125;
#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
@@ -3875,15 +3850,7 @@ struct lpfc_controller_attribute {
#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
#define lpfc_cntl_attr_pci_fnc_num_WORD word127
-#define lpfc_cntl_attr_inf_type_SHIFT 24
-#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
-#define lpfc_cntl_attr_inf_type_WORD word127
- uint32_t unique_id[2];
- uint32_t word130;
-#define lpfc_cntl_attr_num_netfil_SHIFT 0
-#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
-#define lpfc_cntl_attr_num_netfil_WORD word130
- uint32_t reserved2[4];
+ uint32_t rsvd128[7];
};
struct lpfc_mbx_get_cntl_attributes {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7f57397d91a9..90021653e59e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -595,16 +595,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up ring-0 (ELS) timer */
timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
return;
}
@@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
@@ -3120,8 +3120,8 @@ lpfc_cleanup(struct lpfc_vport *vport)
void
lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
- del_timer_sync(&vport->els_tmofunc);
- del_timer_sync(&vport->delayed_disc_tmo);
+ timer_delete_sync(&vport->els_tmofunc);
+ timer_delete_sync(&vport->delayed_disc_tmo);
lpfc_can_disctmo(vport);
return;
}
@@ -3140,7 +3140,7 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
/* Now, try to stop the timer */
- del_timer(&phba->fcf.redisc_wait);
+ timer_delete(&phba->fcf.redisc_wait);
}
/**
@@ -3302,12 +3302,12 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
lpfc_stop_vport_timers(phba->pport);
cancel_delayed_work_sync(&phba->eq_delay_work);
cancel_delayed_work_sync(&phba->idle_stat_delay_work);
- del_timer_sync(&phba->sli.mbox_tmo);
- del_timer_sync(&phba->fabric_block_timer);
- del_timer_sync(&phba->eratt_poll);
- del_timer_sync(&phba->hb_tmofunc);
+ timer_delete_sync(&phba->sli.mbox_tmo);
+ timer_delete_sync(&phba->fabric_block_timer);
+ timer_delete_sync(&phba->eratt_poll);
+ timer_delete_sync(&phba->hb_tmofunc);
if (phba->sli_rev == LPFC_SLI_REV4) {
- del_timer_sync(&phba->rrq_tmr);
+ timer_delete_sync(&phba->rrq_tmr);
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
}
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
@@ -3316,7 +3316,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
/* Stop any LightPulse device specific driver timers */
- del_timer_sync(&phba->fcp_poll_timer);
+ timer_delete_sync(&phba->fcp_poll_timer);
break;
case LPFC_PCI_DEV_OC:
/* Stop any OneConnect device specific driver timers */
@@ -3354,15 +3354,15 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (mbx_action == LPFC_MBX_NO_WAIT)
return;
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3847,8 +3847,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* Otherwise, let dev_loss take care of
* the node.
*/
- if (!(ndlp->save_flags &
- NLP_IN_RECOV_POST_DEV_LOSS) &&
+ if (!test_bit(NLP_IN_RECOV_POST_DEV_LOSS,
+ &ndlp->save_flags) &&
!(ndlp->fc4_xpt_flags &
(NVME_XPT_REGD | SCSI_XPT_REGD)))
lpfc_disc_state_machine
@@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(30 * 1000)) {
+ if (time >= secs_to_jiffies(30)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
- if (time >= msecs_to_jiffies(15 * 1000) &&
+ if (time >= secs_to_jiffies(15) &&
phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0465 Link down longer than 15 "
@@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
if (!atomic_read(&vport->fc_map_cnt) &&
- time < msecs_to_jiffies(2 * 1000))
+ time < secs_to_jiffies(2))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t)
lpfc_worker_wake_up(phba);
/* restart the timer for the next iteration */
- mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
- LPFC_VMID_TIMER));
+ mod_timer(&phba->inactive_vmid_poll,
+ jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
}
/**
@@ -6909,7 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -11109,14 +11108,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.fw_func_mode =
mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
- phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
- phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
phba->sli4_hba.physical_port =
mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
- "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
- phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+ "3251 QUERY_FW_CFG: func_mode:x%x\n",
+ phba->sli4_hba.fw_func_mode);
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -12765,7 +12761,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
* timer. Wait for the poll timer to retire.
*/
synchronize_rcu();
- del_timer_sync(&phba->cpuhp_poll_timer);
+ timer_delete_sync(&phba->cpuhp_poll_timer);
}
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
@@ -12876,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
@@ -13173,6 +13169,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
eqhdl = lpfc_get_eq_hdl(0);
rc = pci_irq_vector(phba->pcidev, 0);
if (rc < 0) {
+ free_irq(phba->pcidev->irq, phba);
pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0496 MSI pci_irq_vec failed (%d)\n", rc);
@@ -13253,6 +13250,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl = lpfc_get_eq_hdl(0);
retval = pci_irq_vector(phba->pcidev, 0);
if (retval < 0) {
+ free_irq(phba->pcidev->irq, phba);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0502 INTR pci_irq_vec failed (%d)\n",
retval);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e98f1c2b2220..fb6dbcb86c09 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2524,8 +2524,10 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
/* addr mode is bit wise inverted value of fcf addr_mode */
- bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
- (~phba->fcf.addr_mode) & 0x3);
+ if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+ (~phba->fcf.addr_mode) & 0x3);
+ }
} else {
/* This is ONLY for NVMET MRQ == 1 */
if (phba->cfg_nvmet_mrq != 1)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4d88cfe71cae..a596b80d03d4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -64,9 +64,6 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
- /* First, we MUST have a RPI registered */
- if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag))
- return 0;
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
@@ -735,6 +732,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ADISC *ap;
uint32_t *lp;
uint32_t cmd;
+ int rc;
pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
@@ -759,21 +757,29 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* resume the RPI before the ACC goes out.
*/
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
- GFP_KERNEL);
- if (elsiocb) {
- /* Save info from cmd IOCB used in rsp */
- memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
- sizeof(struct lpfc_iocbq));
-
- /* Save the ELS cmd */
- elsiocb->drvrTimeout = cmd;
-
- if (lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi,
- elsiocb))
- kfree(elsiocb);
- goto out;
+ /* Don't resume an unregistered RPI - unnecessary
+ * mailbox. Just send the ACC when the RPI is not
+ * registered.
+ */
+ if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) {
+ elsiocb = kmalloc(sizeof(*elsiocb), GFP_KERNEL);
+ if (elsiocb) {
+ /* Save info from cmd IOCB used in
+ * rsp
+ */
+ memcpy(elsiocb, cmdiocb,
+ sizeof(*elsiocb));
+
+ elsiocb->drvrTimeout = cmd;
+
+ rc = lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb);
+ if (rc)
+ kfree(elsiocb);
+
+ goto out;
+ }
}
}
@@ -815,7 +821,6 @@ out:
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = ndlp->nlp_state;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return 0;
}
@@ -906,7 +911,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_printf_vlog(vport, KERN_INFO,
@@ -1332,7 +1337,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
@@ -1936,7 +1941,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
@@ -2255,11 +2260,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
out:
- set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag);
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_DISCOVERY | LOG_NODE,
+ "6228 Sending LOGO, determined nlp_type "
+ "0x%x nlp_flag x%lx refcnt %u\n",
+ ndlp->nlp_type, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
lpfc_issue_els_logo(vport, ndlp, 0);
-
- ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
return ndlp->nlp_state;
}
@@ -2743,7 +2750,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 43dc1da4a156..b1adb9f59097 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
* wait. Print a message if a 10 second wait expires and renew the
* wait. This is unexpected.
*/
- wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO);
while (true) {
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 905026a4782c..9edf80b14b1a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5136,6 +5136,12 @@ lpfc_info(struct Scsi_Host *host)
goto buffer_done;
}
+ /* Support for BSG ioctls */
+ scnprintf(tmp, sizeof(tmp), " BSG");
+ if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
+ sizeof(lpfcinfobuf))
+ goto buffer_done;
+
/* PCI resettable */
if (!lpfc_check_pci_resettable(phba)) {
scnprintf(tmp, sizeof(tmp), " PCI resettable");
@@ -5482,7 +5488,7 @@ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
struct lpfc_vmid *cur;
if (vport->port_type == LPFC_PHYSICAL_PORT)
- del_timer_sync(&vport->phba->inactive_vmid_poll);
+ timer_delete_sync(&vport->phba->inactive_vmid_poll);
kfree(vport->qfpa_res);
kfree(vport->vmid_priority.vmid_range);
@@ -5639,9 +5645,8 @@ wait_for_cmpl:
* cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
- wait_event_timeout(waitq,
- (lpfc_cmd->pCmd != cmnd),
- msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd),
+ secs_to_jiffies(2*vport->cfg_devloss_tmo));
spin_lock(&lpfc_cmd->buf_lock);
@@ -5905,7 +5910,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode)
return FAILED;
@@ -5951,7 +5956,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
lpfc_sli_abort_taskmgmt(vport,
&phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
@@ -6120,31 +6125,27 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
/* Issue LOGO, if no LOGO is outstanding */
spin_lock_irqsave(&pnode->lock, flags);
- if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags) &&
!pnode->logo_waitq) {
pnode->logo_waitq = &waitq;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
- pnode->save_flags |= NLP_WAIT_FOR_LOGO;
spin_unlock_irqrestore(&pnode->lock, flags);
+ set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag);
+ set_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags);
lpfc_unreg_rpi(vport, pnode);
wait_event_timeout(waitq,
- (!(pnode->save_flags &
- NLP_WAIT_FOR_LOGO)),
- msecs_to_jiffies(dev_loss_tmo *
- 1000));
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags),
+ secs_to_jiffies(dev_loss_tmo));
- if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
+ if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
+ &pnode->save_flags))
lpfc_printf_vlog(vport, KERN_ERR, logit,
"0725 SCSI layer TGTRST "
"failed & LOGO TMO (%d, %llu) "
"return x%x\n",
tgt_id, lun_id, status);
- spin_lock_irqsave(&pnode->lock, flags);
- pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
- } else {
- spin_lock_irqsave(&pnode->lock, flags);
- }
+ spin_lock_irqsave(&pnode->lock, flags);
pnode->logo_waitq = NULL;
spin_unlock_irqrestore(&pnode->lock, flags);
status = SUCCESS;
@@ -6226,7 +6227,7 @@ error:
}
/**
- * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
+ * lpfc_sdev_init - scsi_host_template sdev_init entry point
* @sdev: Pointer to scsi_device.
*
* This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
@@ -6239,7 +6240,7 @@ error:
* 0 - Success
**/
static int
-lpfc_slave_alloc(struct scsi_device *sdev)
+lpfc_sdev_init(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6342,8 +6343,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
}
/**
- * lpfc_slave_configure - scsi_host_template slave_configure entry point
+ * lpfc_sdev_configure - scsi_host_template sdev_configure entry point
* @sdev: Pointer to scsi_device.
+ * @lim: Request queue limits.
*
* This routine configures following items
* - Tag command queuing support for @sdev if supported.
@@ -6353,7 +6355,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
* 0 - Success
**/
static int
-lpfc_slave_configure(struct scsi_device *sdev)
+lpfc_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6371,13 +6373,13 @@ lpfc_slave_configure(struct scsi_device *sdev)
}
/**
- * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
+ * lpfc_sdev_destroy - sdev_destroy entry point of SHT data structure
* @sdev: Pointer to scsi_device.
*
* This routine sets @sdev hostatdata filed to null.
**/
static void
-lpfc_slave_destroy(struct scsi_device *sdev)
+lpfc_sdev_destroy(struct scsi_device *sdev)
{
struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
struct lpfc_hba *phba = vport->phba;
@@ -6422,7 +6424,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
{
struct lpfc_device_data *lun_info;
- int memory_flags;
+ gfp_t memory_flags;
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
!(phba->cfg_fof))
@@ -6737,7 +6739,13 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_slave(struct scsi_device *sdev)
+lpfc_init_no_sdev(struct scsi_device *sdev)
+{
+ return -ENODEV;
+}
+
+static int
+lpfc_config_no_sdev(struct scsi_device *sdev, struct queue_limits *lim)
{
return -ENODEV;
}
@@ -6748,8 +6756,8 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .slave_alloc = lpfc_no_slave,
- .slave_configure = lpfc_no_slave,
+ .sdev_init = lpfc_init_no_sdev,
+ .sdev_configure = lpfc_config_no_sdev,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = 1,
@@ -6772,9 +6780,9 @@ struct scsi_host_template lpfc_template = {
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
@@ -6799,9 +6807,9 @@ struct scsi_host_template lpfc_vport_template = {
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = NULL,
.eh_host_reset_handler = NULL,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
+ .sdev_init = lpfc_sdev_init,
+ .sdev_configure = lpfc_sdev_configure,
+ .sdev_destroy = lpfc_sdev_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 874644b31a3e..a335d34070d3 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1025,7 +1025,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
LIST_HEAD(send_rrq);
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
- next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
@@ -1208,8 +1208,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1736,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
BUG_ON(!piocb->vport);
if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+ jiffies + secs_to_jiffies(phba->fc_ratov << 1));
}
return 0;
@@ -2923,6 +2921,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
}
/* The unreg_login mailbox is complete and had a
@@ -3956,8 +3956,7 @@ void lpfc_poll_eratt(struct timer_list *t)
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll,
- jiffies +
- msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -5042,7 +5041,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
return 1;
}
- del_timer_sync(&psli->mbox_tmo);
+ timer_delete_sync(&psli->mbox_tmo);
if (ha_copy & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
@@ -6004,9 +6003,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
- memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
- strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
+ memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
sizeof(phba->BIOSVersion));
+ phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
@@ -9008,11 +9007,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+ jiffies + secs_to_jiffies(phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
- jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@@ -9027,7 +9026,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
/*
* The port is ready, set the host's link state to LINK_DOWN
@@ -9504,8 +9503,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000);
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox));
mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
@@ -9629,8 +9627,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
drvr_flag);
goto out_not_finished;
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -9756,9 +9753,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Make sure the mailbox is really active */
@@ -9881,8 +9877,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
- * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -10230,7 +10225,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+ secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -12081,7 +12076,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
local_bh_enable();
/* Return any active mbox cmds */
- del_timer_sync(&psli->mbox_tmo);
+ timer_delete_sync(&psli->mbox_tmo);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
@@ -13159,7 +13154,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = msecs_to_jiffies(timeout * 1000);
+ timeout_req = secs_to_jiffies(timeout);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
@@ -13275,8 +13270,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
- wait_for_completion_timeout(&mbox_done,
- msecs_to_jiffies(timeout * 1000));
+ wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout));
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->ctx_u.mbox_wait = NULL;
@@ -13323,7 +13317,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
lpfc_sli_mbox_sys_flush(phba);
return;
}
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
@@ -13336,9 +13330,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
@@ -13809,7 +13802,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
phba->sli.mbox_active = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
- del_timer(&phba->sli.mbox_tmo);
+ timer_delete(&phba->sli.mbox_tmo);
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
@@ -14309,7 +14302,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
/* Reset heartbeat timer */
phba->last_completion_time = jiffies;
- del_timer(&phba->sli.mbox_tmo);
+ timer_delete(&phba->sli.mbox_tmo);
/* Move mbox data to caller's mailbox region, do endian swapping */
if (pmb->mbox_cmpl && mbox)
@@ -15696,7 +15689,7 @@ static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
synchronize_rcu();
if (list_empty(&phba->poll_list))
- del_timer_sync(&phba->cpuhp_poll_timer);
+ timer_delete_sync(&phba->cpuhp_poll_timer);
}
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c1e9ec0243ba..9be3da91c923 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -865,8 +865,6 @@ struct lpfc_sli4_hba {
struct lpfc_name wwpn;
uint32_t fw_func_mode; /* FW function protocol mode */
- uint32_t ulp0_mode; /* ULP0 protocol mode */
- uint32_t ulp1_mode; /* ULP1 protocol mode */
/* Optimized Access Storage specific queues/structures */
uint64_t oas_next_lun;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 61fe1220f8ad..638b50f35287 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.6"
+#define LPFC_DRIVER_VERSION "14.4.0.8"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index cc3e4736f2fe..14dbfe954e42 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
mod_timer(&vport->phba->inactive_vmid_poll,
jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ secs_to_jiffies(LPFC_VMID_TIMER));
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9e0e35763377..cc56a7334319 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
- wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
@@ -492,21 +492,22 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
spin_lock_irq(&ndlp->lock);
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags) &&
!ndlp->logo_waitq) {
ndlp->logo_waitq = &waitq;
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ set_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
}
spin_unlock_irq(&ndlp->lock);
rc = lpfc_issue_els_npiv_logo(vport, ndlp);
if (!rc) {
wait_event_timeout(waitq,
- (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ !test_bit(NLP_WAIT_FOR_LOGO,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
- if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags))
goto logo_cmpl;
/* LOGO wait failed. Correct status. */
rc = -EINTR;
@@ -516,9 +517,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Error - clean up node flags. */
clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag);
- spin_lock_irq(&ndlp->lock);
- ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
- spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags);
logo_cmpl:
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
@@ -696,19 +695,20 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = &waitq;
- ndlp->save_flags |= NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ set_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
if (!rc) {
wait_event_timeout(waitq,
- !(ndlp->save_flags & NLP_WAIT_FOR_DA_ID),
+ !test_bit(NLP_WAIT_FOR_DA_ID,
+ &ndlp->save_flags),
msecs_to_jiffies(phba->fc_ratov * 2000));
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS,
"1829 DA_ID issue status %d. "
- "SFlag x%x NState x%x, NFlag x%lx "
+ "SFlag x%lx NState x%x, NFlag x%lx "
"Rpi x%x\n",
rc, ndlp->save_flags, ndlp->nlp_state,
ndlp->nlp_flag, ndlp->nlp_rpi);
@@ -718,8 +718,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
*/
spin_lock_irq(&ndlp->lock);
ndlp->da_id_waitq = NULL;
- ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
spin_unlock_irq(&ndlp->lock);
+ clear_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags);
}
issue_logo:
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 38976f94453e..2006094af418 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
@@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
- scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
@@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* failed or the input parameter is invalid
*/
if( status == 1 &&
- (cmd->cmnd[0] == RESERVE ||
- cmd->cmnd[0] == RELEASE) ) {
+ (cmd->cmnd[0] == RESERVE_6 ||
+ cmd->cmnd[0] == RELEASE_6) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
@@ -4551,7 +4551,7 @@ megaraid_shutdown(struct pci_dev *pdev)
__megaraid_shutdown(adapter);
}
-static struct pci_device_id megaraid_pci_tbl[] = {
+static const struct pci_device_id megaraid_pci_tbl[] = {
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index bc867da650b6..d533a8aa72cc 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -199,7 +199,7 @@ MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
* PCI table for all supported controllers.
*/
-static struct pci_device_id pci_id_table_g[] = {
+static const struct pci_device_id pci_id_table_g[] = {
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
@@ -621,7 +621,7 @@ megaraid_io_attach(adapter_t *adapter)
host = scsi_host_alloc(&megaraid_template_g, 8);
if (!host) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid mbox: scsi_register failed\n"));
+ "megaraid mbox: scsi_host_alloc failed\n"));
return -1;
}
@@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
*/
@@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
- ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
@@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp)
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
- if (status == 1 && (scp->cmnd[0] == RESERVE ||
- scp->cmnd[0] == RELEASE)) {
+ if (status == 1 && (scp->cmnd[0] == RESERVE_6 ||
+ scp->cmnd[0] == RELEASE_6)) {
scp->result = DID_ERROR << 16 |
SAM_STAT_RESERVATION_CONFLICT;
@@ -3951,7 +3951,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
}
- del_timer_sync(&timeout.timer);
+ timer_delete_sync(&timeout.timer);
destroy_timer_on_stack(&timeout.timer);
mutex_unlock(&raid_dev->sysfs_mtx);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index c509440bd161..1f2cd15e3361 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -703,7 +703,7 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
*/
wait_event(wait_q, (kioc->status != -ENODATA));
if (timeout.timer.function) {
- del_timer_sync(&timeout.timer);
+ timer_delete_sync(&timeout.timer);
destroy_timer_on_stack(&timeout.timer);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 088cc40ae866..8ee2bfe47571 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -23,8 +23,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.727.03.00-rc1"
-#define MEGASAS_RELDATE "Oct 03, 2023"
+#define MEGASAS_VERSION "07.734.00.00-rc1"
+#define MEGASAS_RELDATE "Apr 03, 2025"
#define MEGASAS_MSIX_NAME_LEN 32
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 50f1dcb6d584..6d4de06082d1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -94,7 +93,7 @@ static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
-int perf_mode = -1;
+static int perf_mode = -1;
module_param(perf_mode, int, 0444);
MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
@@ -106,15 +105,15 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
-int event_log_level = MFI_EVT_CLASS_CRITICAL;
+static int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
-unsigned int enable_sdev_max_qd;
+static unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int poll_queues;
+static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
"This parameter is effective only if host_tagset_enable=1 &\n\t\t"
@@ -123,7 +122,7 @@ MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode
"High iops queues are not allocated &\n\t\t"
);
-int host_tagset_enable = 1;
+static int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
@@ -147,7 +146,7 @@ megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
*/
-static struct pci_device_id megasas_pci_table[] = {
+static const struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
/* xscale IOP */
@@ -2068,8 +2067,8 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
}
-static int megasas_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int megasas_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
u16 pd_index = 0;
struct megasas_instance *instance;
@@ -2104,12 +2103,15 @@ static int megasas_device_configure(struct scsi_device *sdev,
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
+ if (!MEGASAS_IS_LOGICAL(sdev))
+ sdev->no_vpd_size = 1;
+
mutex_unlock(&instance->reset_mutex);
return 0;
}
-static int megasas_slave_alloc(struct scsi_device *sdev)
+static int megasas_sdev_init(struct scsi_device *sdev)
{
u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
@@ -2154,7 +2156,7 @@ scan_target:
return 0;
}
-static void megasas_slave_destroy(struct scsi_device *sdev)
+static void megasas_sdev_destroy(struct scsi_device *sdev)
{
u16 ld_tgt_id;
struct megasas_instance *instance;
@@ -3193,7 +3195,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
map->nr_queues = instance->msix_vectors - offset;
map->queue_offset = 0;
- blk_mq_pci_map_queues(map, instance->pdev, offset);
+ blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
qoff += map->nr_queues;
offset += map->nr_queues;
@@ -3510,9 +3512,9 @@ static const struct scsi_host_template megasas_template = {
.module = THIS_MODULE,
.name = "Avago SAS based MegaRAID driver",
.proc_name = "megaraid_sas",
- .device_configure = megasas_device_configure,
- .slave_alloc = megasas_slave_alloc,
- .slave_destroy = megasas_slave_destroy,
+ .sdev_configure = megasas_sdev_configure,
+ .sdev_init = megasas_sdev_init,
+ .sdev_destroy = megasas_sdev_destroy,
.queuecommand = megasas_queue_command,
.eh_target_reset_handler = megasas_reset_target,
.eh_abort_handler = megasas_task_abort,
@@ -3663,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
- cmd->scmd->result =
- (DID_ERROR << 16) | hdr->scsi_status;
+ if (hdr->scsi_status == 0xf0)
+ cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
+ else
+ cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
@@ -5906,7 +5910,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
+ int nid = dev_to_node(&instance->pdev->dev);
+
+ if (nid == NUMA_NO_NODE)
+ nid = 0;
+ mask = cpumask_of_node(nid);
for (i = 0; i < instance->low_latency_index_start; i++) {
irq = pci_irq_vector(instance->pdev, i);
@@ -6522,7 +6530,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
fail_start_watchdog:
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
fail_get_ld_pd_list:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
@@ -7604,7 +7612,7 @@ fail_io_attach:
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
@@ -7744,7 +7752,7 @@ megasas_suspend(struct device *dev)
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
/* Stop the FW fault detection watchdog */
if (instance->adapter_type != MFI_SERIES)
@@ -7908,7 +7916,7 @@ megasas_resume(struct device *dev)
fail_start_watchdog:
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
fail_init_mfi:
megasas_free_ctrl_dma_buffers(instance);
megasas_free_ctrl_mem(instance);
@@ -7972,7 +7980,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
/* Shutdown SR-IOV heartbeat timer */
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
/* Stop the FW fault detection watchdog */
if (instance->adapter_type != MFI_SERIES)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 1eec23da28e2..a6794f49e9fa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion,
case MFI_STAT_SCSI_IO_FAILED:
case MFI_STAT_LD_INIT_IN_PROGRESS:
- scmd->result = (DID_ERROR << 16) | ext_status;
+ if (ext_status == 0xf0)
+ scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
+ else
+ scmd->result = (DID_ERROR << 16) | ext_status;
break;
case MFI_STAT_SCSI_DONE_WITH_ERROR:
@@ -4969,7 +4972,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
}
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
- del_timer_sync(&instance->sriov_heartbeat_timer);
+ timer_delete_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 00cd18edfad6..96401eb7e231 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -19,6 +19,7 @@
#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_SHIFT (4)
#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
@@ -29,10 +30,13 @@
#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_SHIFT (28)
#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0)
#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 2c6e548cbd0f..8d824107a678 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -66,7 +66,12 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000c0)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
@@ -214,11 +219,13 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0)
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
@@ -236,6 +243,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index af86d12c8e49..bbef5bac92ed 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -38,23 +38,31 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
-#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
@@ -99,6 +107,7 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_SHIFT (0)
#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index c374867f9ba0..b42933fcd423 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -30,6 +30,7 @@ struct mpi3_ioc_init_request {
#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
@@ -40,6 +41,7 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
@@ -111,9 +113,11 @@ struct mpi3_ioc_facts_data {
__le32 diag_tty_size;
};
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
@@ -134,6 +138,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
@@ -149,6 +154,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
@@ -161,10 +167,12 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
@@ -204,6 +212,7 @@ struct mpi3_create_request_queue_request {
};
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
@@ -237,10 +246,12 @@ struct mpi3_create_reply_queue_request {
};
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
@@ -326,9 +337,11 @@ struct mpi3_event_notification_reply {
};
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
struct mpi3_event_data_gpio_interrupt {
@@ -487,6 +500,7 @@ struct mpi3_event_sas_topo_phy_entry {
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
@@ -566,6 +580,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
@@ -573,6 +588,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
@@ -881,6 +897,7 @@ struct mpi3_pel_req_action_acknowledge {
};
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
@@ -953,6 +971,7 @@ struct mpi3_ci_download_reply {
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
@@ -976,9 +995,11 @@ struct mpi3_ci_upload_request {
};
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
index 3b960893870f..50a65b16a818 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
@@ -9,6 +9,7 @@
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
+#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index b2ab25a1cfeb..5c522e2531c3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (34)
+#define MPI3_VERSION_UNIT (35)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -80,6 +80,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
@@ -97,6 +98,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
@@ -106,6 +108,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
@@ -124,6 +127,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
@@ -133,6 +137,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
@@ -151,6 +156,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_SHIFT (0)
#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
@@ -176,17 +182,20 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
@@ -207,7 +216,9 @@ struct mpi3_default_reply_descriptor {
};
#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
@@ -301,6 +312,7 @@ union mpi3_sge_union {
};
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
@@ -309,6 +321,7 @@ union mpi3_sge_union {
#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SHIFT (0)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
@@ -322,15 +335,18 @@ union mpi3_sge_union {
#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6)
#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4)
#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
@@ -403,6 +419,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_STATUS_SHIFT (0)
#define MPI3_IOCSTATUS_SUCCESS (0x0000)
#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
#define MPI3_IOCSTATUS_BUSY (0x0002)
@@ -469,4 +486,5 @@ struct mpi3_default_reply {
#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0c3e1ac076b5..9bbc7cb98ca3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
@@ -57,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.12.0.3.50"
-#define MPI3MR_DRIVER_RELDATE "11-November-2024"
+#define MPI3MR_DRIVER_VERSION "8.13.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "20-February-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -81,13 +80,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
-#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -329,6 +329,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -388,6 +389,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
+ bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -457,6 +459,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
+ * @qfull_watermark: Watermark defined in reply queue to avoid
+ * reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -472,6 +476,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
+ u16 qfull_watermark;
};
/**
@@ -929,6 +934,8 @@ struct trigger_event_data {
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
+ * @is_segmented: The buffer is segmented or not
+ * @disabled_after_reset: The buffer is disabled after reset
*/
struct diag_buffer_desc {
u8 type;
@@ -938,6 +945,8 @@ struct diag_buffer_desc {
u32 size;
void *addr;
dma_addr_t dma_addr;
+ bool is_segmented;
+ bool disabled_after_reset;
};
/**
@@ -1023,6 +1032,8 @@ struct scmd_priv {
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
* @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @admin_pend_isr: Count of unprocessed admin ISR/poll calls
+ * due to another thread processing replies
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1091,6 +1102,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1154,6 +1166,12 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
+ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
+ * @prevent_reply_qfull: Enable reply queue prevention
+ * @seg_tb_support: Segmented trace buffer support
+ * @num_tb_segs: Number of Segments in Trace buffer
+ * @trace_buf_pool: DMA pool for Segmented trace buffer segments
+ * @trace_buf: Trace buffer segments memory descriptor
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1190,6 +1208,7 @@ struct mpi3mr_ioc {
void *admin_reply_base;
dma_addr_t admin_reply_dma;
atomic_t admin_reply_q_in_use;
+ atomic_t admin_pend_isr;
u32 ready_timeout;
@@ -1277,6 +1296,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
+ u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1352,6 +1372,13 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
+ atomic_t reply_qfull_count;
+ bool prevent_reply_qfull;
+ bool seg_tb_support;
+ u32 num_tb_segs;
+ struct dma_pool *trace_buf_pool;
+ struct segments *trace_buf;
+
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 7589f48aebc8..f36663613950 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -12,23 +12,98 @@
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
/**
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
* @mrioc: Adapter instance reference
* @trace_size: Trace buffer size
*
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
{
struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+ int i, sz;
+ u64 *diag_buffer_list = NULL;
+ dma_addr_t diag_buffer_list_dma;
+ u32 seg_count;
+
+ if (mrioc->seg_tb_support) {
+ seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+ diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * seg_count,
+ &diag_buffer_list_dma, GFP_KERNEL);
+ if (!diag_buffer_list)
+ return -1;
+
+ mrioc->num_tb_segs = seg_count;
+
+ sz = sizeof(struct segments) * seg_count;
+ mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->trace_buf)
+ goto trace_buf_failed;
+
+ mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+ &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+ 0);
+ if (!mrioc->trace_buf_pool) {
+ ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+ goto trace_buf_pool_failed;
+ }
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
- if (diag_buffer->addr) {
- dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ for (i = 0; i < seg_count; i++) {
+ mrioc->trace_buf[i].segment =
+ dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+ &mrioc->trace_buf[i].segment_dma);
+ diag_buffer_list[i] =
+ (u64) mrioc->trace_buf[i].segment_dma;
+ if (!diag_buffer_list[i])
+ goto tb_seg_alloc_failed;
+ }
+
+ diag_buffer->addr = diag_buffer_list;
+ diag_buffer->dma_addr = diag_buffer_list_dma;
+ diag_buffer->is_segmented = true;
+
+ dprint_init(mrioc, "segmented trace diag buffer\n"
+ "is allocated successfully seg_count:%d\n", seg_count);
return 0;
+ } else {
+ diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (diag_buffer->addr) {
+ dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ return 0;
+ }
+ return -1;
}
+
+tb_seg_alloc_failed:
+ if (mrioc->trace_buf_pool) {
+ for (i = 0; i < mrioc->num_tb_segs; i++) {
+ if (mrioc->trace_buf[i].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[i].segment,
+ mrioc->trace_buf[i].segment_dma);
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+trace_buf_pool_failed:
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+trace_buf_failed:
+ if (diag_buffer_list)
+ dma_free_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * mrioc->num_tb_segs,
+ diag_buffer_list, diag_buffer_list_dma);
return -1;
}
@@ -100,8 +175,9 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
u8 prev_status;
int retval = 0;
+ if (diag_buffer->disabled_after_reset) {
+ dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
+ "as it is disabled after reset\n", __func__);
+ return -1;
+ }
+
memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
- dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
- diag_buffer->type);
+ if (diag_buffer->is_segmented)
+ diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+ dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+ diag_buffer->type, diag_buffer->is_segmented);
+
prev_status = diag_buffer->status;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
init_completion(&mrioc->init_cmds.done);
@@ -2339,6 +2425,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
}
if (!mrioc->ioctl_sges_allocated) {
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
__func__);
return -ENOMEM;
@@ -3061,6 +3148,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
+ * reply_qfull_count_show - Show reply qfull count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
+ * formats it as a string for display.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
+}
+
+static DEVICE_ATTR_RO(reply_qfull_count);
+
+/**
* logging_level_show - Show controller debug level
* @dev: class device
* @attr: Device attributes
@@ -3152,6 +3262,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
+ &dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5ed31fe57474..1d7901a8f0e4 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
-
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
char *desc = NULL;
u16 event;
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
+ return;
+
event = event_reply->event;
switch (event) {
@@ -446,9 +449,12 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
+ atomic_inc(&mrioc->admin_pend_isr);
return 0;
+ }
+ atomic_set(&mrioc->admin_pend_isr, 0);
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -459,7 +465,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +560,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -563,7 +569,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
- atomic_dec(&op_reply_q->pend_ios);
+
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -1302,7 +1308,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
retval = 0;
- ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
return retval;
}
@@ -1355,6 +1361,19 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
+ if (mrioc->seg_tb_support) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+ ioc_err(mrioc,
+ "critical error: previously enabled segmented trace\n"
+ " buffer capability is disabled after reset. Please\n"
+ " update the firmware or reboot the system or\n"
+ " reload the driver to enable trace diag buffer\n");
+ mrioc->diag_buffers[0].disabled_after_reset = true;
+ } else
+ mrioc->diag_buffers[0].disabled_after_reset = false;
+ }
+
if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
GFP_KERNEL);
@@ -1717,7 +1736,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
(!retval)?"successful":"failed", ioc_status,
ioc_config);
if (retval)
@@ -2104,15 +2123,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
- !mrioc->pdev->revision)
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+
+ if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
+ if (mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ } else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
+
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
+ op_reply_q->qfull_watermark =
+ op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2442,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
+ struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
+ op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2476,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
+ /* Reply queue is nearing to get full, push back IOs to SML */
+ if ((mrioc->prevent_reply_qfull == true) &&
+ (atomic_read(&op_reply_q->pend_ios) >
+ (op_reply_q->qfull_watermark))) {
+ atomic_inc(&mrioc->reply_qfull_count);
+ retval = -EAGAIN;
+ goto out;
+ }
+
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -2726,7 +2763,16 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
+ if (atomic_read(&mrioc->admin_pend_isr)) {
+ ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
+ "flush admin replies\n");
+ mpi3mr_process_admin_reply_q(mrioc);
+ }
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
+ (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
+
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -2883,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
mrioc->admin_reply_ci = 0;
mrioc->admin_reply_ephase = 1;
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (!mrioc->admin_req_base) {
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -3091,6 +3138,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4264,13 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
+ if (mrioc->facts.max_req_limit)
+ mrioc->prevent_reply_qfull = true;
+
+ if (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+ mrioc->seg_tb_support = true;
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4427,7 @@ retry_init:
goto out_failed_noretry;
}
+ mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -4600,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
if (mrioc->admin_reply_base)
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (mrioc->init_cmds.reply) {
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
@@ -4671,7 +4730,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*/
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
- u16 i;
+ u16 i, j;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
@@ -4806,6 +4865,26 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
+ if ((i == 0) && mrioc->seg_tb_support) {
+ if (mrioc->trace_buf_pool) {
+ for (j = 0; j < mrioc->num_tb_segs; j++) {
+ if (mrioc->trace_buf[j].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[j].segment,
+ mrioc->trace_buf[j].segment_dma);
+ mrioc->trace_buf[j].segment = NULL;
+ }
+
+ mrioc->trace_buf[j].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+ diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+ }
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
@@ -4883,7 +4962,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
}
ioc_info(mrioc,
- "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status,
ioc_config);
}
@@ -5229,6 +5308,55 @@ cleanup_drv_cmd:
}
/**
+ * mpi3mr_check_op_admin_proc -
+ * @mrioc: Adapter instance reference
+ *
+ * Check if any of the operation reply queues
+ * or the admin reply queue are currently in use.
+ * If any queue is in use, this function waits for
+ * a maximum of 10 seconds for them to become available.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
+{
+
+ u16 timeout = 10 * 10;
+ u16 elapsed_time = 0;
+ bool op_admin_in_use = false;
+
+ do {
+ op_admin_in_use = false;
+
+ /* Check admin_reply queue first to exit early */
+ if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
+ op_admin_in_use = true;
+ else {
+ /* Check op_reply queues */
+ int i;
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
+ op_admin_in_use = true;
+ break;
+ }
+ }
+ }
+
+ if (!op_admin_in_use)
+ break;
+
+ msleep(100);
+
+ } while (++elapsed_time < timeout);
+
+ if (op_admin_in_use)
+ return 1;
+
+ return 0;
+}
+
+/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
* @reset_reason: Reset reason code
@@ -5308,6 +5436,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
+ mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5464,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+
+ retval = mpi3mr_check_op_admin_proc(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
+ "thread still processing replies even after a 10 second\n"
+ "timeout. Marking the controller as unrecoverable!\n");
+
+ goto out;
+ }
+
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 1bef88130d0c..c186b892150f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3839,6 +3839,18 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
if (scmd) {
+ if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv)
+ goto out_unlock;
+
+ struct op_req_qinfo *op_req_q;
+
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id =
+ cpu_to_le16(op_req_q->qid);
+ }
sdev = scmd->device;
sdev_priv_data = sdev->hostdata;
scsi_tgt_priv_data = ((sdev_priv_data) ?
@@ -4042,7 +4054,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
@@ -4388,6 +4400,92 @@ out:
}
/**
+ * mpi3mr_eh_abort - Callback function for abort error handling
+ * @scmd: SCSI command reference
+ *
+ * Issues Abort Task Management if the command is in LLD scope
+ * and verifies if it is aborted successfully, and return status
+ * accordingly.
+ *
+ * Return: SUCCESS if the abort was successful, otherwise FAILED
+ */
+static int mpi3mr_eh_abort(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *cmd_priv;
+ u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc);
+ unsigned long scmd_age_sec = scmd_age_ms / HZ;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd);
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n",
+ mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
+ scmd->retries, scmd->allowed);
+
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device not available, Skip issuing abort task\n",
+ mrioc->name);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv->in_lld_scope ||
+ cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n",
+ mrioc->name, scmd);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+
+ ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ timeout, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (cmd_priv->in_lld_scope) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort task failed. scmd (0x%p) was not terminated\n",
+ mrioc->name, scmd);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
* mpi3mr_scan_start - Scan start callback handler
* @shost: SCSI host reference
*
@@ -4465,14 +4563,14 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
}
/**
- * mpi3mr_slave_destroy - Slave destroy callback handler
+ * mpi3mr_sdev_destroy - Slave destroy callback handler
* @sdev: SCSI device reference
*
* Cleanup and free per device(lun) private data.
*
* Return: Nothing.
*/
-static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+static void mpi3mr_sdev_destroy(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -4552,7 +4650,7 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
}
/**
- * mpi3mr_device_configure - Slave configure callback handler
+ * mpi3mr_sdev_configure - Slave configure callback handler
* @sdev: SCSI device reference
* @lim: queue limits
*
@@ -4561,8 +4659,8 @@ static void mpi3mr_target_destroy(struct scsi_target *starget)
*
* Return: 0 always.
*/
-static int mpi3mr_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim)
+static int mpi3mr_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct scsi_target *starget;
struct Scsi_Host *shost;
@@ -4599,14 +4697,14 @@ static int mpi3mr_device_configure(struct scsi_device *sdev,
}
/**
- * mpi3mr_slave_alloc -Slave alloc callback handler
+ * mpi3mr_sdev_init -Slave alloc callback handler
* @sdev: SCSI device reference
*
* Allocate per device(lun) private data and initialize it.
*
* Return: 0 on success -ENOMEM on memory allocation failure.
*/
-static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+static int mpi3mr_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
@@ -5062,13 +5160,14 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.proc_name = MPI3MR_DRIVER_NAME,
.queuecommand = mpi3mr_qcmd,
.target_alloc = mpi3mr_target_alloc,
- .slave_alloc = mpi3mr_slave_alloc,
- .device_configure = mpi3mr_device_configure,
+ .sdev_init = mpi3mr_sdev_init,
+ .sdev_configure = mpi3mr_sdev_configure,
.target_destroy = mpi3mr_target_destroy,
- .slave_destroy = mpi3mr_slave_destroy,
+ .sdev_destroy = mpi3mr_sdev_destroy,
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_abort_handler = mpi3mr_eh_abort,
.eh_device_reset_handler = mpi3mr_eh_dev_reset,
.eh_target_reset_handler = mpi3mr_eh_target_reset,
.eh_bus_reset_handler = mpi3mr_eh_bus_reset,
@@ -5803,7 +5902,7 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
-static struct pci_error_handlers mpi3mr_err_handler = {
+static const struct pci_error_handlers mpi3mr_err_handler = {
.error_detected = mpi3mr_pcierr_error_detected,
.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
.slot_reset = mpi3mr_pcierr_slot_reset,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 6de35b32223c..b181b113fc80 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -125,6 +125,12 @@
* 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
* 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
* 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
+ * 07-20-20 02.00.58 Bumped MPI2_HEADER_VERSION_UNIT
+ * 03-30-21 02.00.59 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-03-22 02.00.60 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-20-23 02.00.61 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-13-24 02.00.62 Bumped MPI2_HEADER_VERSION_UNIT
+ * Added MPI2_FUNCTION_MCTP_PASSTHROUGH
* --------------------------------------------------------------------------
*/
@@ -165,7 +171,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x39)
+#define MPI2_HEADER_VERSION_UNIT (0x3E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -669,6 +675,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
+#define MPI2_FUNCTION_MCTP_PASSTHROUGH (0x34)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..02bf26ca976e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -251,6 +251,7 @@
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
* 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
* Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
+ * 09-13-24 02.00.50 Added PCIe 32 GT/s link rate
*/
#ifndef MPI2_CNFG_H
@@ -606,7 +607,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
@@ -1121,6 +1122,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_32_0_GBPS (0x04)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -2301,6 +2303,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_PROD_SPECIFIC_1 (0x8000)
#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
@@ -3591,6 +3594,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI26_PCIE_NEG_LINK_RATE_32_0 (0x06)
/****************************************************************************
@@ -3700,6 +3704,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_32_0 (0x60)
/*values for PCIe IO Unit Page 1 DMDReportPCIe */
#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d92852591134..1a279c6e1a9f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -179,6 +179,7 @@
* Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
* Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
* Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
+ * 9-13-24 02.00.39 Added MPI26_MCTP_PASSTHROUGH messages
* --------------------------------------------------------------------------
*/
@@ -382,6 +383,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU (0x00800000)
#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
@@ -1798,5 +1800,57 @@ typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
Mpi26IoUnitControlReply_t,
*pMpi26IoUnitControlReply_t;
+/****************************************************************************
+ * MCTP Passthrough messages (MPI v2.6 and later only.)
+ ****************************************************************************/
+
+/* MCTP Passthrough Request Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REQUEST {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1[2]; /* 0x01 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ U8 Flags; /* 0x10 */
+ U8 Reserved5[3]; /* 0x11 */
+ U32 Reserved6; /* 0x14 */
+ U32 H2DLength; /* 0x18 */
+ U32 D2HLength; /* 0x1C */
+ MPI25_SGE_IO_UNION H2DSGL; /* 0x20 */
+ MPI25_SGE_IO_UNION D2HSGL; /* 0x30 */
+} MPI26_MCTP_PASSTHROUGH_REQUEST,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REQUEST,
+ Mpi26MctpPassthroughRequest_t,
+ *pMpi26MctpPassthroughRequest_t;
+
+/* values for the MsgContext field */
+#define MPI26_MCTP_MSG_CONEXT_UNUSED (0x00)
+
+/* values for the Flags field */
+#define MPI26_MCTP_FLAGS_MSG_FORMAT_MPT (0x01)
+
+/* MCTP Passthrough Reply Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REPLY {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ResponseDataLength; /* 0x14 */
+} MPI26_MCTP_PASSTHROUGH_REPLY,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REPLY,
+ Mpi26MctpPassthroughReply_t,
+ *pMpi26MctpPassthroughReply_t;
#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index c1d8f2c91a5e..bd3efa5b46c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1202,6 +1202,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
ioc->sge_size;
func_str = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) +
+ ioc->sge_size;
+ func_str = "mctp_passthru";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -4874,6 +4879,12 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
+ if (ioc->facts.IOCCapabilities &
+ MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ pr_cont("%sMCTP Passthru", i ? "," : "");
+ i++;
+ }
+
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
pr_cont("%sNCQ", i ? "," : "");
@@ -5627,7 +5638,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
- pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ pr_err("%s: overriding NVDATA EEDPTagMode setting from 0 to 1\n",
ioc->name);
ioc->manu_pg11.EEDPTagMode = 0x1;
mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
@@ -8018,7 +8029,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->hostdiag_unlock_mutex);
if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
- goto out;
+ goto unlock;
hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
@@ -8038,7 +8049,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc,
"Invalid host diagnostic register value\n");
_base_dump_reg_set(ioc);
- goto out;
+ goto unlock;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -8074,17 +8085,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
_base_dump_reg_set(ioc);
- goto out;
+ goto fail;
}
pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
- out:
+unlock:
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+fail:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index d8d1a64b4764..939141cde3ca 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "51.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 51
+#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 52
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
@@ -1858,9 +1858,6 @@ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
-int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz);
int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply,
struct Mpi2ManufacturingPage10_t *config_page);
@@ -1887,9 +1884,6 @@ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
u32 form, u32 handle);
-int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle);
int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
u32 form, u32 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 2e88f456fc34..45ac853e1289 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -577,44 +577,6 @@ mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @sz: size of buffer passed in config_page
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 7;
- mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1214,47 +1176,6 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @form: GET_NEXT_HANDLE or HANDLE
- * @handle: device handle
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
- mpi_request.Header.PageNumber = 1;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 87784c96249a..02fc204b9bf7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -186,6 +186,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
desc = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ desc = "mctp_passthrough";
+ break;
}
if (!desc)
@@ -653,6 +656,40 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
/**
+ * _ctl_send_mctp_passthru_req - Send an MCTP passthru request
+ * @ioc: per adapter object
+ * @mctp_passthru_req: MPI mctp passhthru request from caller
+ * @psge: pointer to the H2DSGL
+ * @data_out_dma: DMA buffer for H2D SGL
+ * @data_out_sz: H2D length
+ * @data_in_dma: DMA buffer for D2H SGL
+ * @data_in_sz: D2H length
+ * @smid: SMID to submit the request
+ *
+ */
+static void
+_ctl_send_mctp_passthru_req(
+ struct MPT3SAS_ADAPTER *ioc,
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge,
+ dma_addr_t data_out_dma, int data_out_sz,
+ dma_addr_t data_in_dma, int data_in_sz,
+ u16 smid)
+{
+ mctp_passthru_req->H2DLength = data_out_sz;
+ mctp_passthru_req->D2HLength = data_in_sz;
+
+ /* Build the H2D SGL from the data out buffer */
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0);
+
+ psge += ioc->sge_size_ieee;
+
+ /* Build the D2H SGL for the data in buffer */
+ ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz);
+
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
* @karg: (struct mpt3_ioctl_command)
@@ -679,6 +716,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ int tm_ret;
issue_reset = 0;
@@ -792,6 +830,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ {
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req =
+ (Mpi26MctpPassthroughRequest_t *)request;
+
+ if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) {
+ ioc_err(ioc, "%s: MCTP Passthrough request not supported\n",
+ __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+ break;
+ }
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
@@ -1120,18 +1175,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(
pcie_device->device_info))))
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
+
+ if (tm_ret != SUCCESS) {
+ ioc_info(ioc,
+ "target reset failed, issue hard reset: handle (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
@@ -1200,6 +1262,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU;
+
if (copy_to_user(arg, &karg, sizeof(karg))) {
pr_err("failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2787,6 +2851,218 @@ out_unlock_pciaccess:
}
/**
+ * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at
+ * dev_index positionthat support MCTP passhtru
+ * @dev_index: position in the mpt3sas_ioc_list to search for
+ * Return pointer to the IOC on success
+ * NULL if device not found error
+ */
+static struct MPT3SAS_ADAPTER *
+_ctl_get_mpt_mctp_passthru_adapter(int dev_index)
+{
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+ int count = 0;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ if (count == dev_index) {
+ spin_unlock(&gioc_lock);
+ return ioc;
+ }
+ count++;
+ }
+ }
+ spin_unlock(&gioc_lock);
+
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int
+mpt3sas_get_device_count(void)
+{
+ int count = 0;
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)
+ count++;
+
+ spin_unlock(&gioc_lock);
+
+ return count;
+}
+EXPORT_SYMBOL(mpt3sas_get_device_count);
+
+/**
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error.
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req;
+ u16 smid;
+ unsigned long timeout;
+ u8 issue_reset = 0;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+
+ /* Retrieve ioc from dev_index */
+ ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index);
+ if (!ioc)
+ return -ENODEV;
+
+ mutex_lock(&ioc->pci_access_mutex);
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host) {
+ ret = -EAGAIN;
+ goto unlock_pci_access;
+ }
+
+ /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */
+ if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
+ ret = -ERESTARTSYS;
+ goto unlock_pci_access;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
+ ret = -EAGAIN;
+ goto unlock_ctl_cmds;
+ }
+
+ ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (ret)
+ goto unlock_ctl_cmds;
+
+ mpi_request = (MPI2RequestHeader_t *)command->mpi_request;
+ if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) {
+ ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n",
+ __func__, mpi_request->Function);
+ ret = -EINVAL;
+ goto unlock_ctl_cmds;
+ }
+
+ /* Use first reserved smid for passthrough commands */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+ memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t));
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = command->data_out_size;
+ data_in_sz = command->data_in_size;
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_ATOMIC);
+ if (!data_out) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ memcpy(data_out, command->data_out_buf_ptr, data_out_sz);
+
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_ATOMIC);
+ if (!data_in) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL;
+
+ init_completion(&ioc->ctl_cmds.done);
+
+ mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request;
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+
+ timeout = command->timeout;
+ if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+
+ wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset);
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+
+ /* copy out xdata to user */
+ if (data_in_sz)
+ memcpy(command->data_in_buf_ptr, data_in, data_in_sz);
+
+ /* copy out reply message frame to user */
+ if (command->max_reply_bytes) {
+ sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz);
+ memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz);
+ }
+
+issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
+
+out:
+ /* free memory associated with sg buffers */
+ if (data_in)
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
+ data_out_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+
+unlock_ctl_cmds:
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+
+unlock_pci_access:
+ mutex_unlock(&ioc->pci_access_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req);
+
+/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
* @file: (struct file)
* @cmd: ioctl opcode
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 171709e91006..483e0549c02f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -160,6 +160,9 @@ struct mpt3_ioctl_pci_info {
#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
+/* Bits set for mpt3_ioctl_iocinfo.driver_cap */
+#define MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU 0x1
+
/**
* struct mpt3_ioctl_iocinfo - generic controller info
* @hdr - generic header
@@ -175,6 +178,7 @@ struct mpt3_ioctl_pci_info {
* @driver_version - driver version - 32 ASCII characters
* @rsvd1 - reserved
* @scsi_id - scsi id of adapter 0
+ * @driver_capability - driver capabilities
* @rsvd2 - reserved
* @pci_information - pci info (2nd revision)
*/
@@ -192,7 +196,8 @@ struct mpt3_ioctl_iocinfo {
uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
uint8_t rsvd1;
uint8_t scsi_id;
- uint16_t rsvd2;
+ uint8_t driver_capability;
+ uint8_t rsvd2;
struct mpt3_ioctl_pci_info pci_information;
};
@@ -458,4 +463,46 @@ struct mpt3_enable_diag_sbr_reload {
struct mpt3_ioctl_header hdr;
};
+/**
+ * struct mpt3_passthru_command - generic mpt firmware passthru command
+ * @dev_index - device index
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - MPI reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @mpi_request - request frame
+ */
+struct mpt3_passthru_command {
+ u8 dev_index;
+ uint32_t timeout;
+ void *reply_frame_buf_ptr;
+ void *data_in_buf_ptr;
+ void *data_out_buf_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ Mpi26MctpPassthroughRequest_t *mpi_request;
+};
+
+/*
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int mpt3sas_get_device_count(void);
+
+/*
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error .
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command);
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f2a55aa5fe65..508861e88d9f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -53,7 +53,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
-#include <linux/blk-mq-pci.h>
#include <linux/unaligned.h>
#include "mpt3sas_base.h"
@@ -2026,14 +2025,14 @@ scsih_target_destroy(struct scsi_target *starget)
}
/**
- * scsih_slave_alloc - device add routine
+ * scsih_sdev_init - device add routine
* @sdev: scsi device struct
*
* Return: 0 if ok. Any other return is assumed to be an error and
* the device is ignored.
*/
static int
-scsih_slave_alloc(struct scsi_device *sdev)
+scsih_sdev_init(struct scsi_device *sdev)
{
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
@@ -2108,11 +2107,11 @@ scsih_slave_alloc(struct scsi_device *sdev)
}
/**
- * scsih_slave_destroy - device destroy routine
+ * scsih_sdev_destroy - device destroy routine
* @sdev: scsi device struct
*/
static void
-scsih_slave_destroy(struct scsi_device *sdev)
+scsih_sdev_destroy(struct scsi_device *sdev)
{
struct MPT3SAS_TARGET *sas_target_priv_data;
struct scsi_target *starget;
@@ -2497,7 +2496,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
}
/**
- * scsih_device_configure - device configure routine.
+ * scsih_sdev_configure - device configure routine.
* @sdev: scsi device struct
* @lim: queue limits
*
@@ -2505,7 +2504,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
* the device is ignored.
*/
static int
-scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
+scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct Scsi_Host *shost = sdev->host;
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -2704,7 +2703,7 @@ scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
- sdev_printk(KERN_WARNING, sdev,
+ sdev_printk(KERN_INFO, sdev,
"set ignore_delay_remove for handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->ignore_delay_remove = 1;
@@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
else
blk_mq_map_queues(map);
@@ -11905,10 +11904,10 @@ static const struct scsi_host_template mpt2sas_driver_template = {
.proc_name = MPT2SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
@@ -11943,10 +11942,10 @@ static const struct scsi_host_template mpt3sas_driver_template = {
.proc_name = MPT3SAS_DRIVER_NAME,
.queuecommand = scsih_qcmd,
.target_alloc = scsih_target_alloc,
- .slave_alloc = scsih_slave_alloc,
- .device_configure = scsih_device_configure,
+ .sdev_init = scsih_sdev_init,
+ .sdev_configure = scsih_sdev_configure,
.target_destroy = scsih_target_destroy,
- .slave_destroy = scsih_slave_destroy,
+ .sdev_destroy = scsih_sdev_destroy,
.scan_finished = scsih_scan_finished,
.scan_start = scsih_scan_start,
.change_queue_depth = scsih_change_queue_depth,
@@ -12711,7 +12710,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
-static struct pci_error_handlers _mpt3sas_err_handler = {
+static const struct pci_error_handlers _mpt3sas_err_handler = {
.error_detected = scsih_pci_error_detected,
.mmio_enabled = scsih_pci_mmio_enabled,
.slot_reset = scsih_pci_slot_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 020037cbf0d9..2c72da6b8cf0 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -609,7 +609,7 @@ static void mvs_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id mvs_pci_table[] = {
+static const struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1444b1f1c4c8..52ac10226cb0 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -151,16 +151,6 @@ static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
}
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
-{
- u32 no;
- for_each_phy(phy_mask, phy_mask, no) {
- if (!(phy_mask & 1))
- continue;
- MVS_CHIP_DISP->phy_reset(mvi, no, hard);
- }
-}
-
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
@@ -986,7 +976,7 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
static void mvs_sig_remove_timer(struct mvs_phy *phy)
{
if (phy->timer.function)
- del_timer(&phy->timer);
+ timer_delete(&phy->timer);
phy->timer.function = NULL;
}
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 19b01f7c4767..09ce3f2241f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -425,7 +425,6 @@ struct mvs_task_exec_info {
void mvs_get_sas_addr(void *buf, u32 buflen);
void mvs_iounmap(void __iomem *regs);
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index d9d366ec17dc..96549e7f5705 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2000,7 +2000,8 @@ static struct mvumi_instance_template mvumi_instance_9580 = {
.reset_host = mvumi_reset_host_9580,
};
-static int mvumi_slave_configure(struct scsi_device *sdev)
+static int mvumi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct mvumi_hba *mhba;
unsigned char bitcount = sizeof(unsigned char) * 8;
@@ -2172,7 +2173,7 @@ static const struct scsi_host_template mvumi_template = {
.module = THIS_MODULE,
.name = "Marvell Storage Controller",
- .slave_configure = mvumi_slave_configure,
+ .sdev_configure = mvumi_sdev_configure,
.queuecommand = mvumi_queue_command,
.eh_timed_out = mvumi_timed_out,
.eh_host_reset_handler = mvumi_host_reset,
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index a7e64b867c8e..486db5b2f05d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -891,7 +891,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
status = mmio_init_fn(pdev, base, &mbox);
if (status != MYRB_STATUS_SUCCESS) {
dev_err(&pdev->dev,
- "Failed to enable mailbox, statux %02X\n",
+ "Failed to enable mailbox, status %02X\n",
status);
return false;
}
@@ -1619,7 +1619,7 @@ static int myrb_queuecommand(struct Scsi_Host *shost,
return myrb_pthru_queuecommand(shost, scmd);
}
-static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+static int myrb_ldev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_ldev_info *ldev_info;
@@ -1627,8 +1627,6 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
enum raid_level level;
ldev_info = cb->ldev_info_buf + ldev_num;
- if (!ldev_info)
- return -ENXIO;
sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!sdev->hostdata)
@@ -1665,7 +1663,7 @@ static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+static int myrb_pdev_sdev_init(struct scsi_device *sdev)
{
struct myrb_hba *cb = shost_priv(sdev->host);
struct myrb_pdev_state *pdev_info;
@@ -1701,7 +1699,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrb_slave_alloc(struct scsi_device *sdev)
+static int myrb_sdev_init(struct scsi_device *sdev)
{
if (sdev->channel > myrb_logical_channel(sdev->host))
return -ENXIO;
@@ -1710,12 +1708,13 @@ static int myrb_slave_alloc(struct scsi_device *sdev)
return -ENXIO;
if (sdev->channel == myrb_logical_channel(sdev->host))
- return myrb_ldev_slave_alloc(sdev);
+ return myrb_ldev_sdev_init(sdev);
- return myrb_pdev_slave_alloc(sdev);
+ return myrb_pdev_sdev_init(sdev);
}
-static int myrb_slave_configure(struct scsi_device *sdev)
+static int myrb_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrb_ldev_info *ldev_info;
@@ -1741,7 +1740,7 @@ static int myrb_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrb_slave_destroy(struct scsi_device *sdev)
+static void myrb_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -2208,9 +2207,9 @@ static const struct scsi_host_template myrb_template = {
.proc_name = "myrb",
.queuecommand = myrb_queuecommand,
.eh_host_reset_handler = myrb_host_reset,
- .slave_alloc = myrb_slave_alloc,
- .slave_configure = myrb_slave_configure,
- .slave_destroy = myrb_slave_destroy,
+ .sdev_init = myrb_sdev_init,
+ .sdev_configure = myrb_sdev_configure,
+ .sdev_destroy = myrb_sdev_destroy,
.bios_param = myrb_biosparam,
.cmd_size = sizeof(struct myrb_cmdblk),
.shost_groups = myrb_shost_groups,
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 1469d0c54e45..95af3bb03834 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1786,7 +1786,7 @@ static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
return ldev_num;
}
-static int myrs_slave_alloc(struct scsi_device *sdev)
+static int myrs_sdev_init(struct scsi_device *sdev)
{
struct myrs_hba *cs = shost_priv(sdev->host);
unsigned char status;
@@ -1882,7 +1882,8 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int myrs_slave_configure(struct scsi_device *sdev)
+static int myrs_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct myrs_hba *cs = shost_priv(sdev->host);
struct myrs_ldev_info *ldev_info;
@@ -1910,7 +1911,7 @@ static int myrs_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void myrs_slave_destroy(struct scsi_device *sdev)
+static void myrs_sdev_destroy(struct scsi_device *sdev)
{
kfree(sdev->hostdata);
}
@@ -1921,9 +1922,9 @@ static const struct scsi_host_template myrs_template = {
.proc_name = "myrs",
.queuecommand = myrs_queuecommand,
.eh_host_reset_handler = myrs_host_reset,
- .slave_alloc = myrs_slave_alloc,
- .slave_configure = myrs_slave_configure,
- .slave_destroy = myrs_slave_destroy,
+ .sdev_init = myrs_sdev_init,
+ .sdev_configure = myrs_sdev_configure,
+ .sdev_destroy = myrs_sdev_destroy,
.cmd_size = sizeof(struct myrs_cmdblk),
.shost_groups = myrs_shost_groups,
.sdev_groups = myrs_sdev_groups,
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 35869b4f9329..14ac81ec0aa0 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -7786,7 +7786,7 @@ static void __init ncr_getclock (struct ncb *np, int mult)
/*===================== LINUX ENTRY POINTS SECTION ==========================*/
-static int ncr53c8xx_slave_alloc(struct scsi_device *device)
+static int ncr53c8xx_sdev_init(struct scsi_device *device)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -7796,7 +7796,8 @@ static int ncr53c8xx_slave_alloc(struct scsi_device *device)
return 0;
}
-static int ncr53c8xx_slave_configure(struct scsi_device *device)
+static int ncr53c8xx_sdev_configure(struct scsi_device *device,
+ struct queue_limits *lim)
{
struct Scsi_Host *host = device->host;
struct ncb *np = ((struct host_data *) host->hostdata)->ncb;
@@ -8093,8 +8094,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
tpnt->shost_groups = ncr53c8xx_host_groups;
tpnt->queuecommand = ncr53c8xx_queue_command;
- tpnt->slave_configure = ncr53c8xx_slave_configure;
- tpnt->slave_alloc = ncr53c8xx_slave_alloc;
+ tpnt->sdev_configure = ncr53c8xx_sdev_configure;
+ tpnt->sdev_init = ncr53c8xx_sdev_init;
tpnt->eh_bus_reset_handler = ncr53c8xx_bus_reset;
tpnt->can_queue = SCSI_NCR_CAN_QUEUE;
tpnt->this_id = 7;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b7987019686e..abc4ce9eae74 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -66,7 +66,7 @@ static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
-static struct pci_device_id nsp32_pci_table[] = {
+static const struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 7871e29a820a..4e19d61dffbb 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -90,7 +90,7 @@ enum port_type {
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
-#define PM8001_RESERVE_SLOT 8
+#define PM8001_RESERVE_SLOT 128
#define PM8001_SECTOR_SIZE 512
#define PM8001_PAGE_SIZE_4K 4096
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dec1e2d380f1..42a4eeac24c9 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3472,12 +3472,13 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status, tag, scp);
switch (status) {
case IO_SUCCESS:
- pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
+ pm8001_dbg(pm8001_ha, FAIL, "ABORT IO_SUCCESS for tag %#x\n",
+ tag);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
- pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
+ pm8001_dbg(pm8001_ha, FAIL, "IO_NOT_VALID for tag %#x\n", tag);
ts->resp = TMF_RESP_FUNC_FAILED;
break;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c81e53e93f..599410bcdfea 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) {
- blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
return;
}
@@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
return -EIO;
}
time_remaining = wait_for_completion_timeout(&completion,
- msecs_to_jiffies(60*1000)); // 1 min
+ secs_to_jiffies(60)); // 1 min
if (!time_remaining) {
kfree(payload.func_specific);
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
@@ -1435,7 +1435,7 @@ err_out_disable:
/* update of pci device, vendor id and driver data with
* unique value for each of the controller
*/
-static struct pci_device_id pm8001_pci_table[] = {
+static const struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
{ PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d80cffd25a6e..f7067878b34f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -101,6 +101,63 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
return 0;
}
+static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
+ int *ata_tag, bool *task_aborted)
+{
+ unsigned long flags;
+ struct ata_queued_cmd *qc = NULL;
+
+ *ata_op = 0;
+ *ata_tag = -1;
+ *task_aborted = false;
+
+ if (!task)
+ return;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED)))
+ *task_aborted = true;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (task->task_proto == SAS_PROTOCOL_STP) {
+ // sas_ata_qc_issue path uses SAS_PROTOCOL_STP.
+ // This only works for scsi + libsas + libata users.
+ qc = task->uldd_task;
+ if (qc) {
+ *ata_op = qc->tf.command;
+ *ata_tag = qc->tag;
+ }
+ }
+}
+
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *target_pm8001_dev)
+{
+ int i = 0, ata_op = 0, ata_tag = -1;
+ struct pm8001_ccb_info *ccb = NULL;
+ struct sas_task *task = NULL;
+ struct pm8001_device *pm8001_dev = NULL;
+ bool task_aborted;
+
+ for (i = 0; i < pm8001_ha->ccb_count; i++) {
+ ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+ pm8001_dev = ccb->device;
+ if (target_pm8001_dev && pm8001_dev &&
+ target_pm8001_dev != pm8001_dev)
+ continue;
+ task = ccb->task;
+ pm80xx_get_tag_opcodes(task, &ata_op, &ata_tag, &task_aborted);
+ pm8001_dbg(pm8001_ha, FAIL,
+ "tag %#x, device %#x task %p task aborted %d ata opcode %#x ata tag %d\n",
+ ccb->ccb_tag,
+ (pm8001_dev ? pm8001_dev->device_id : 0),
+ task, task_aborted,
+ ata_op, ata_tag);
+ }
+}
+
/**
* pm8001_mem_alloc - allocate memory for pm8001.
* @pdev: pci device.
@@ -374,23 +431,6 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
}
- /* Find the local port id that's attached to this device */
-static int sas_find_local_port_id(struct domain_device *dev)
-{
- struct domain_device *pdev = dev->parent;
-
- /* Directly attached device */
- if (!pdev)
- return dev->port->id;
- while (pdev) {
- struct domain_device *pdev_p = pdev->parent;
- if (!pdev_p)
- return pdev->port->id;
- pdev = pdev->parent;
- }
- return 0;
-}
-
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
@@ -463,10 +503,10 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
if (!internal_abort &&
- (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ (DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
if (sas_protocol_ata(task_proto)) {
@@ -726,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 42c7b3f7afbf..315f6a7523f0 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -56,7 +56,6 @@
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -787,6 +786,8 @@ static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
}
void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
+void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index e65951dd2024..5b373c53c036 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2246,7 +2246,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 param;
u32 status;
u32 tag;
- int i, j;
+ int i, j, ata_tag = -1;
u8 sata_addr_low[4];
u32 temp_sata_addr_low, temp_sata_addr_hi;
u8 sata_addr_hi[4];
@@ -2256,6 +2256,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
u32 *sata_resp;
struct pm8001_device *pm8001_dev;
unsigned long flags;
+ struct ata_queued_cmd *qc;
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
@@ -2267,8 +2268,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dev = ccb->device;
if (t) {
- if (t->dev && (t->dev->lldd_dev))
+ if (t->dev && (t->dev->lldd_dev)) {
pm8001_dev = t->dev->lldd_dev;
+ qc = t->uldd_task;
+ ata_tag = qc ? qc->tag : -1;
+ }
} else {
pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n",
ccb->ccb_tag);
@@ -2276,16 +2280,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
return;
}
-
if (pm8001_dev && unlikely(!t->lldd_task || !t->dev))
return;
ts = &t->task_status;
-
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
- "IO failed device_id %u status 0x%x tag %d\n",
- pm8001_dev->device_id, status, tag);
+ "IO failed status %#x pm80xx tag %#x ata tag %d\n",
+ status, tag, ata_tag);
}
/* Print sas address of IO failed device */
@@ -2667,13 +2669,19 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
/* Check if this is NCQ error */
if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+ /* tag value is invalid with this event */
+ pm8001_dbg(pm8001_ha, FAIL, "NCQ ERROR for device %#x tag %#x\n",
+ dev_id, tag);
+
/* find device using device id */
pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
/* send read log extension by aborting the link - libata does what we want */
- if (pm8001_dev)
+ if (pm8001_dev) {
+ pm80xx_show_pending_commands(pm8001_ha, pm8001_dev);
pm8001_handle_event(pm8001_ha,
pm8001_dev,
IO_XFER_ERROR_ABORTED_NCQ_MODE);
+ }
return;
}
@@ -3336,10 +3344,11 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phy_id =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, INIT,
- "phy start resp status:0x%x, phyid:0x%x\n",
- status, phy_id);
+ "phy start resp status:0x%x, phyid:0x%x, tag 0x%x\n",
+ status, phy_id, tag);
if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
@@ -3348,6 +3357,8 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
complete(phy->enable_completion);
phy->enable_completion = NULL;
}
+
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3628,8 +3639,10 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 phyid =
le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
- pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
- phyid, status);
+ u32 tag = le32_to_cpu(pPayload->tag);
+
+ pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x tag 0x%x\n", phyid,
+ status, tag);
if (status == PHY_STOP_SUCCESS ||
status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
@@ -3637,6 +3650,7 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->sas_phy.linkrate = SAS_PHY_DISABLED;
}
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
@@ -3655,10 +3669,9 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(pPayload->tag);
pm8001_dbg(pm8001_ha, MSG,
- "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
- status, err_qlfr_pgcd);
+ "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x tag 0x%x\n",
+ status, err_qlfr_pgcd, tag);
pm8001_tag_free(pm8001_ha, tag);
-
return 0;
}
@@ -4632,9 +4645,16 @@ static int
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTART;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
@@ -4670,9 +4690,16 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- u32 tag = 0x01;
+ int ret;
+ u32 tag;
u32 opcode = OPC_INB_PHYSTOP;
+ ret = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL, "Tag allocation failed\n");
+ return ret;
+ }
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4c5881917d76..e0aeb206df8d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -113,7 +113,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
/*
* PCI device ids supported by pmcraid driver
*/
-static struct pci_device_id pmcraid_pci_table[] = {
+static const struct pci_device_id pmcraid_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
@@ -125,7 +125,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
/**
- * pmcraid_slave_alloc - Prepare for commands to a device
+ * pmcraid_sdev_init - Prepare for commands to a device
* @scsi_dev: scsi device struct
*
* This function is called by mid-layer prior to sending any command to the new
@@ -136,7 +136,7 @@ MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
* Return value:
* 0 on success / -ENXIO if device does not exist
*/
-static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+static int pmcraid_sdev_init(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *temp, *res = NULL;
struct pmcraid_instance *pinstance;
@@ -197,7 +197,7 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
}
/**
- * pmcraid_device_configure - Configures a SCSI device
+ * pmcraid_sdev_configure - Configures a SCSI device
* @scsi_dev: scsi device struct
* @lim: queue limits
*
@@ -210,8 +210,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
* Return value:
* 0 on success
*/
-static int pmcraid_device_configure(struct scsi_device *scsi_dev,
- struct queue_limits *lim)
+static int pmcraid_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct pmcraid_resource_entry *res = scsi_dev->hostdata;
@@ -248,17 +248,17 @@ static int pmcraid_device_configure(struct scsi_device *scsi_dev,
}
/**
- * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ * pmcraid_sdev_destroy - Unconfigure a SCSI device before removing it
*
* @scsi_dev: scsi device struct
*
* This is called by mid-layer before removing a device. Pointer assignments
- * done in pmcraid_slave_alloc will be reset to NULL here.
+ * done in pmcraid_sdev_init will be reset to NULL here.
*
* Return value
* none
*/
-static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+static void pmcraid_sdev_destroy(struct scsi_device *scsi_dev)
{
struct pmcraid_resource_entry *res;
@@ -495,7 +495,7 @@ static void pmcraid_clr_trans_op(
}
if (pinstance->reset_cmd != NULL) {
- del_timer(&pinstance->reset_cmd->timer);
+ timer_delete(&pinstance->reset_cmd->timer);
spin_lock_irqsave(
pinstance->host->host_lock, lock_flags);
pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
@@ -1999,7 +1999,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
cpu_to_le32(PMCRAID_DRIVER_ILID);
/* In case the command timer is still running */
- del_timer(&cmd->timer);
+ timer_delete(&cmd->timer);
/* If this is an IO command, complete it by invoking scsi_done
* function. If this is one of the internal commands other
@@ -3668,9 +3668,9 @@ static const struct scsi_host_template pmcraid_host_template = {
.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
- .slave_alloc = pmcraid_slave_alloc,
- .device_configure = pmcraid_device_configure,
- .slave_destroy = pmcraid_slave_destroy,
+ .sdev_init = pmcraid_sdev_init,
+ .sdev_configure = pmcraid_sdev_configure,
+ .sdev_destroy = pmcraid_sdev_destroy,
.change_queue_depth = pmcraid_change_queue_depth,
.can_queue = PMCRAID_MAX_IO_CMD,
.this_id = -1,
@@ -3982,7 +3982,7 @@ static void pmcraid_tasklet_function(unsigned long instance)
list_del(&cmd->free_list);
spin_unlock_irqrestore(&pinstance->pending_pool_lock,
pending_lock_flags);
- del_timer(&cmd->timer);
+ timer_delete(&cmd->timer);
atomic_dec(&pinstance->outstanding_cmds);
if (cmd->cmd_done == pmcraid_ioa_reset) {
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 90495a832f34..92fe5c5c5bb0 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -61,7 +61,8 @@ enum lv1_atapi_in_out {
};
-static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
+static int ps3rom_sdev_configure(struct scsi_device *scsi_dev,
+ struct queue_limits *lim)
{
struct ps3rom_private *priv = shost_priv(scsi_dev->host);
struct ps3_storage_device *dev = priv->dev;
@@ -325,7 +326,7 @@ done:
static const struct scsi_host_template ps3rom_host_template = {
.name = DEVICE_NAME,
- .slave_configure = ps3rom_slave_configure,
+ .sdev_configure = ps3rom_sdev_configure,
.queuecommand = ps3rom_queuecommand,
.can_queue = 1,
.this_id = 7,
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 8d8c760eee43..769da92ee20d 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -104,7 +104,7 @@ void qedf_capture_grc_dump(struct qedf_ctx *qedf)
static ssize_t
qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
ssize_t ret = 0;
@@ -124,7 +124,7 @@ qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
static ssize_t
qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct fc_lport *lport = NULL;
@@ -160,14 +160,14 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_grcdump_attr = {
+static const struct bin_attribute sysfs_grcdump_attr = {
.attr = {
.name = "grcdump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qedf_sysfs_read_grcdump,
- .write = qedf_sysfs_write_grcdump,
+ .read_new = qedf_sysfs_read_grcdump,
+ .write_new = qedf_sysfs_write_grcdump,
};
static struct sysfs_bin_attrs bin_file_entries[] = {
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 5ec2b817c694..eeb6c841dacb 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -100,7 +100,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d2f47dc31dbf..6b1ebab36fa3 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -699,7 +699,7 @@ static u32 qedf_get_login_failures(void *cookie)
}
static struct qed_fcoe_cb_ops qedf_cb_ops = {
- {
+ .common = {
.link_update = qedf_link_update,
.bw_update = qedf_bw_update,
.schedule_recovery_handler = qedf_schedule_recovery_handler,
@@ -982,7 +982,8 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
return SUCCESS;
}
-static int qedf_slave_configure(struct scsi_device *sdev)
+static int qedf_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
if (qedf_queue_depth) {
scsi_change_queue_depth(sdev, qedf_queue_depth);
@@ -1003,7 +1004,7 @@ static const struct scsi_host_template qedf_host_template = {
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
.eh_host_reset_handler = qedf_eh_host_reset,
- .slave_configure = qedf_slave_configure,
+ .sdev_configure = qedf_sdev_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index fdda12ef13b0..5a1ec4542183 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -91,7 +91,7 @@ struct Scsi_Host;
struct sysfs_bin_attrs {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
};
int qedi_create_sysfs_attr(struct Scsi_Host *shost,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 628d59dda20c..e87885cc701c 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1961,13 +1961,11 @@ static int qedi_cpu_online(unsigned int cpu)
struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
struct task_struct *thread;
- thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
- cpu_to_node(cpu),
- "qedi_thread/%d", cpu);
+ thread = kthread_create_on_cpu(qedi_percpu_io_thread, (void *)p,
+ cpu, "qedi_thread/%d");
if (IS_ERR(thread))
return PTR_ERR(thread);
- kthread_bind(thread, cpu);
p->iothread = thread;
wake_up_process(thread);
return 0;
@@ -2869,7 +2867,7 @@ static void qedi_remove(struct pci_dev *pdev)
__qedi_remove(pdev, QEDI_MODE_NORMAL);
}
-static struct pci_device_id qedi_pci_tbl[] = {
+static const struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 },
@@ -2878,7 +2876,7 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
-static struct pci_error_handlers qedi_err_handler = {
+static const struct pci_error_handlers qedi_err_handler = {
.error_detected = qedi_io_error_detected,
};
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8958547ac111..078a9c80bce2 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -501,7 +501,7 @@ struct qla_boards {
};
/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
-static struct pci_device_id qla1280_pci_tbl[] = {
+static const struct pci_device_id qla1280_pci_tbl[] = {
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
@@ -1159,7 +1159,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
/**************************************************************************
- * qla1280_slave_configure
+ * qla1280_sdev_configure
*
* Description:
* Determines the queue depth for a given device. There are two ways
@@ -1170,7 +1170,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
* default queue depth (dependent on the number of hardware SCBs).
**************************************************************************/
static int
-qla1280_slave_configure(struct scsi_device *device)
+qla1280_sdev_configure(struct scsi_device *device, struct queue_limits *lim)
{
struct scsi_qla_host *ha;
int default_depth = 3;
@@ -2454,7 +2454,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
qla1280_debounce_register(&reg->istatus);
wait_for_completion(&wait);
- del_timer_sync(&ha->mailbox_timer);
+ timer_delete_sync(&ha->mailbox_timer);
spin_lock_irq(ha->host->host_lock);
@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(upper_32_bits(dma_handle)),
cpu_to_le32(lower_32_bits(dma_handle)),
- cpu_to_le32(sg_dma_len(sg_next(s))));
+ cpu_to_le32(sg_dma_len(s)));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
@@ -4121,7 +4121,7 @@ static const struct scsi_host_template qla1280_driver_template = {
.proc_name = "qla1280",
.name = "Qlogic ISP 1280/12160",
.info = qla1280_info,
- .slave_configure = qla1280_slave_configure,
+ .sdev_configure = qla1280_sdev_configure,
.queuecommand = qla1280_queuecommand,
.eh_abort_handler = qla1280_eh_abort,
.eh_device_reset_handler= qla1280_eh_device_reset,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index e6ece30c4348..dcb0c2af1fa7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -17,7 +17,7 @@ static int qla24xx_vport_disable(struct fc_vport *, bool);
static ssize_t
qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -58,7 +58,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
static ssize_t
qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -168,19 +168,19 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_fw_dump,
- .write = qla2x00_sysfs_write_fw_dump,
+ .read_new = qla2x00_sysfs_read_fw_dump,
+ .write_new = qla2x00_sysfs_write_fw_dump,
};
static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -220,7 +220,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -282,19 +282,19 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_nvram_attr = {
+static const struct bin_attribute sysfs_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUSR | S_IWUSR,
},
.size = 512,
- .read = qla2x00_sysfs_read_nvram,
- .write = qla2x00_sysfs_write_nvram,
+ .read_new = qla2x00_sysfs_read_nvram,
+ .write_new = qla2x00_sysfs_write_nvram,
};
static ssize_t
qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -318,7 +318,7 @@ out:
static ssize_t
qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -344,19 +344,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_optrom_attr = {
+static const struct bin_attribute sysfs_optrom_attr = {
.attr = {
.name = "optrom",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_optrom,
- .write = qla2x00_sysfs_write_optrom,
+ .read_new = qla2x00_sysfs_read_optrom,
+ .write_new = qla2x00_sysfs_write_optrom,
};
static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -529,18 +529,18 @@ out:
return rval;
}
-static struct bin_attribute sysfs_optrom_ctl_attr = {
+static const struct bin_attribute sysfs_optrom_ctl_attr = {
.attr = {
.name = "optrom_ctl",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_optrom_ctl,
+ .write_new = qla2x00_sysfs_write_optrom_ctl,
};
static ssize_t
qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -587,7 +587,7 @@ skip:
static ssize_t
qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -642,19 +642,19 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_vpd_attr = {
+static const struct bin_attribute sysfs_vpd_attr = {
.attr = {
.name = "vpd",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_vpd,
- .write = qla2x00_sysfs_write_vpd,
+ .read_new = qla2x00_sysfs_read_vpd,
+ .write_new = qla2x00_sysfs_write_vpd,
};
static ssize_t
qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -679,18 +679,18 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_sfp_attr = {
+static const struct bin_attribute sysfs_sfp_attr = {
.attr = {
.name = "sfp",
.mode = S_IRUSR | S_IWUSR,
},
.size = SFP_DEV_SIZE,
- .read = qla2x00_sysfs_read_sfp,
+ .read_new = qla2x00_sysfs_read_sfp,
};
static ssize_t
qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -823,19 +823,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_reset_attr = {
+static const struct bin_attribute sysfs_reset_attr = {
.attr = {
.name = "reset",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_sysfs_write_reset,
+ .write_new = qla2x00_sysfs_write_reset,
};
static ssize_t
qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
@@ -866,18 +866,18 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_issue_logo_attr = {
+static const struct bin_attribute sysfs_issue_logo_attr = {
.attr = {
.name = "issue_logo",
.mode = S_IWUSR,
},
.size = 0,
- .write = qla2x00_issue_logo,
+ .write_new = qla2x00_issue_logo,
};
static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -929,18 +929,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_xgmac_stats_attr = {
+static const struct bin_attribute sysfs_xgmac_stats_attr = {
.attr = {
.name = "xgmac_stats",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_xgmac_stats,
+ .read_new = qla2x00_sysfs_read_xgmac_stats,
};
static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
@@ -987,18 +987,18 @@ do_read:
return count;
}
-static struct bin_attribute sysfs_dcbx_tlv_attr = {
+static const struct bin_attribute sysfs_dcbx_tlv_attr = {
.attr = {
.name = "dcbx_tlv",
.mode = S_IRUSR,
},
.size = 0,
- .read = qla2x00_sysfs_read_dcbx_tlv,
+ .read_new = qla2x00_sysfs_read_dcbx_tlv,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
int type;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 79cdfec2bca3..0c2dd782b675 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -67,7 +67,7 @@ void qla2x00_sp_free(srb_t *sp)
{
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- del_timer(&iocb->timer);
+ timer_delete(&iocb->timer);
qla2x00_rel_sp(sp);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 0b41e8a06602..3224044f1775 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2572,7 +2572,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
static void
qla2x00_async_done(struct srb *sp, int res)
{
- if (del_timer(&sp->u.iocb_cmd.timer)) {
+ if (timer_delete(&sp->u.iocb_cmd.timer)) {
/*
* Successfully cancelled the timeout handler
* ref: TMR
@@ -2645,7 +2645,7 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
elsio->u.els_logo.els_logo_pyld,
elsio->u.els_logo.els_logo_pyld_dma);
- del_timer(&elsio->timer);
+ timer_delete(&elsio->timer);
qla2x00_rel_sp(sp);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0cd6f3e14882..13b6cb1b93ac 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
sizeof(*pdb), DMA_FROM_DEVICE);
- if (!pdb_dma) {
+ if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) {
ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 79879c4743e6..8b71ac0b1d99 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -20,7 +20,7 @@ void
qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
{
if (vha->vp_idx && vha->timer_active) {
- del_timer_sync(&vha->timer);
+ timer_delete_sync(&vha->timer);
vha->timer_active = 0;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8f4cc136a9c9..8ee2e337c9e1 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
{
struct scsi_qla_host *vha = lport->private;
- blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7ab717ed7232..b44d134e7105 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
@@ -402,7 +401,7 @@ qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
static __inline__ void
qla2x00_stop_timer(scsi_qla_host_t *vha)
{
- del_timer_sync(&vha->timer);
+ timer_delete_sync(&vha->timer);
vha->timer_active = 0;
}
@@ -1934,7 +1933,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
}
static int
-qla2xxx_slave_alloc(struct scsi_device *sdev)
+qla2xxx_sdev_init(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
@@ -1947,7 +1946,7 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
}
static int
-qla2xxx_slave_configure(struct scsi_device *sdev)
+qla2xxx_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
scsi_qla_host_t *vha = shost_priv(sdev->host);
struct req_que *req = vha->req;
@@ -1957,7 +1956,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
}
static void
-qla2xxx_slave_destroy(struct scsi_device *sdev)
+qla2xxx_sdev_destroy(struct scsi_device *sdev)
{
sdev->hostdata = NULL;
}
@@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
+ blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
+ vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
@@ -8087,10 +8087,10 @@ struct scsi_host_template qla2xxx_driver_template = {
.eh_bus_reset_handler = qla2xxx_eh_bus_reset,
.eh_host_reset_handler = qla2xxx_eh_host_reset,
- .slave_configure = qla2xxx_slave_configure,
+ .sdev_configure = qla2xxx_sdev_configure,
- .slave_alloc = qla2xxx_slave_alloc,
- .slave_destroy = qla2xxx_slave_destroy,
+ .sdev_init = qla2xxx_sdev_init,
+ .sdev_destroy = qla2xxx_sdev_destroy,
.scan_finished = qla2xxx_scan_finished,
.scan_start = qla2xxx_scan_start,
.change_queue_depth = scsi_change_queue_depth,
@@ -8116,7 +8116,7 @@ static const struct pci_error_handlers qla2xxx_err_handler = {
.reset_done = qla_pci_reset_done,
};
-static struct pci_device_id qla2xxx_pci_tbl[] = {
+static const struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6d16546e1729..9e7a407ba1b9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2136,8 +2136,8 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
* @flash_id: Flash ID
*
* This function polls the device until bit 7 of what is read matches data
- * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
- * out (a fatal error). The flash book recommeds reading bit 7 again after
+ * bit 7 or until data bit 5 becomes a 1. If that happens, the flash ROM timed
+ * out (a fatal error). The flash book recommends reading bit 7 again after
* reading bit 5 as a 1.
*
* Returns 0 on success, else non-zero.
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index abfa6ef60480..e3f85d6ea0db 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -10,7 +10,7 @@
static ssize_t
qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -28,7 +28,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
static ssize_t
qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
- struct bin_attribute *ba, char *buf, loff_t off,
+ const struct bin_attribute *ba, char *buf, loff_t off,
size_t count)
{
struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
@@ -104,19 +104,19 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
return count;
}
-static struct bin_attribute sysfs_fw_dump_attr = {
+static const struct bin_attribute sysfs_fw_dump_attr = {
.attr = {
.name = "fw_dump",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
- .read = qla4_8xxx_sysfs_read_fw_dump,
- .write = qla4_8xxx_sysfs_write_fw_dump,
+ .read_new = qla4_8xxx_sysfs_read_fw_dump,
+ .write_new = qla4_8xxx_sysfs_write_fw_dump,
};
static struct sysfs_entry {
char *name;
- struct bin_attribute *attr;
+ const struct bin_attribute *attr;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr },
{ NULL },
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index d91f54a6e752..80c4b4e7526b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -160,7 +160,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
-static int qla4xxx_slave_alloc(struct scsi_device *device);
+static int qla4xxx_sdev_init(struct scsi_device *device);
static umode_t qla4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
@@ -234,7 +234,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.eh_host_reset_handler = qla4xxx_eh_host_reset,
.eh_timed_out = qla4xxx_eh_cmd_timed_out,
- .slave_alloc = qla4xxx_slave_alloc,
+ .sdev_init = qla4xxx_sdev_init,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
@@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
task->data_count,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma))
+ return -ENOMEM;
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -4021,7 +4023,7 @@ static void qla4xxx_start_timer(struct scsi_qla_host *ha,
static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
{
- del_timer_sync(&ha->timer);
+ timer_delete_sync(&ha->timer);
ha->timer_active = 0;
}
@@ -7189,7 +7191,8 @@ exit_new_nt_list:
* 1: if flashnode entry is non-persistent
* 0: if flashnode entry is persistent
**/
-static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev,
+ const void *data)
{
struct iscsi_bus_flash_session *fnode_sess;
@@ -9052,7 +9055,7 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
}
}
-static int qla4xxx_slave_alloc(struct scsi_device *sdev)
+static int qla4xxx_sdev_init(struct scsi_device *sdev)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -9846,7 +9849,7 @@ static const struct pci_error_handlers qla4xxx_err_handler = {
.resume = qla4xxx_pci_resume,
};
-static struct pci_device_id qla4xxx_pci_tbl[] = {
+static const struct pci_device_id qla4xxx_pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_QLOGIC,
.device = PCI_DEVICE_ID_QLOGIC_ISP4010,
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 74866b9f2b14..c9984ef57f26 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -975,7 +975,8 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
-static int qlogicpti_slave_configure(struct scsi_device *sdev)
+static int qlogicpti_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct qlogicpti *qpti = shost_priv(sdev->host);
int tgt = sdev->id;
@@ -1292,7 +1293,7 @@ static const struct scsi_host_template qpti_template = {
.name = "qlogicpti",
.info = qlogicpti_info,
.queuecommand = qlogicpti_queuecommand,
- .slave_configure = qlogicpti_slave_configure,
+ .sdev_configure = qlogicpti_sdev_configure,
.eh_abort_handler = qlogicpti_abort,
.eh_host_reset_handler = qlogicpti_reset,
.can_queue = QLOGICPTI_REQ_QUEUE_LEN,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..518a252eb6aa 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -510,22 +510,34 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
- if (vpd_buf->data[i] == 0x0)
+ switch (vpd_buf->data[i]) {
+ case 0x0:
scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
- if (vpd_buf->data[i] == 0x80)
+ break;
+ case 0x80:
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
- if (vpd_buf->data[i] == 0x83)
+ break;
+ case 0x83:
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
- if (vpd_buf->data[i] == 0x89)
+ break;
+ case 0x89:
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
- if (vpd_buf->data[i] == 0xb0)
+ break;
+ case 0xb0:
scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
- if (vpd_buf->data[i] == 0xb1)
+ break;
+ case 0xb1:
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
- if (vpd_buf->data[i] == 0xb2)
+ break;
+ case 0xb2:
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
- if (vpd_buf->data[i] == 0xb7)
+ break;
+ case 0xb7:
scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
+ break;
+ default:
+ break;
+ }
}
kfree(vpd_buf);
}
@@ -695,26 +707,23 @@ void scsi_cdl_check(struct scsi_device *sdev)
*/
int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
{
- struct scsi_mode_data data;
- struct scsi_sense_hdr sshdr;
- struct scsi_vpd *vpd;
- bool is_ata = false;
char buf[64];
+ bool is_ata;
int ret;
if (!sdev->cdl_supported)
return -EOPNOTSUPP;
rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (vpd)
- is_ata = true;
+ is_ata = rcu_dereference(sdev->vpd_pg89);
rcu_read_unlock();
/*
* For ATA devices, CDL needs to be enabled with a SET FEATURES command.
*/
if (is_ata) {
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
char *buf_data;
int len;
@@ -723,16 +732,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
if (ret)
return -EINVAL;
- /* Enable CDL using the ATA feature page */
+ /* Enable or disable CDL using the ATA feature page */
len = min_t(size_t, sizeof(buf),
data.length - data.header_length -
data.block_descriptor_length);
buf_data = buf + data.header_length +
data.block_descriptor_length;
- if (enable)
- buf_data[4] = 0x02;
- else
- buf_data[4] = 0;
+
+ /*
+ * If we want to enable CDL and CDL is already enabled on the
+ * device, do nothing. This avoids needlessly resetting the CDL
+ * statistics on the device as that is implied by the CDL enable
+ * action. Similar to this, there is no need to do anything if
+ * we want to disable CDL and CDL is already disabled.
+ */
+ if (enable) {
+ if ((buf_data[4] & 0x03) == 0x02)
+ goto out;
+ buf_data[4] &= ~0x03;
+ buf_data[4] |= 0x02;
+ } else {
+ if ((buf_data[4] & 0x03) == 0x00)
+ goto out;
+ buf_data[4] &= ~0x03;
+ }
ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
&data, &sshdr);
@@ -744,6 +767,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
}
}
+out:
sdev->cdl_enable = enable;
return 0;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 680ba180a672..f0eec4708ddd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -71,6 +71,10 @@ static const char *sdebug_version_date = "20210520";
#define NO_ADDITIONAL_SENSE 0x0
#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
+#define FILEMARK_DETECTED_ASCQ 0x1
+#define EOP_EOM_DETECTED_ASCQ 0x2
+#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
+#define EOD_DETECTED_ASCQ 0x5
#define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
@@ -80,8 +84,10 @@ static const char *sdebug_version_date = "20210520";
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
#define WRITE_PROTECTED 0x27
+#define UA_READY_ASC 0x28
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
+#define TOO_MANY_IN_PARTITION_ASC 0x3b
#define TARGET_CHANGED_ASC 0x3f
#define LUNS_CHANGED_ASCQ 0x0e
#define INSUFF_RES_ASC 0x55
@@ -173,6 +179,37 @@ static const char *sdebug_version_date = "20210520";
#define DEF_ZBC_MAX_OPEN_ZONES 8
#define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */
+#define TAPE_DEF_DENSITY 0x0
+#define TAPE_BAD_DENSITY 0x65
+#define TAPE_DEF_BLKSIZE 0
+#define TAPE_MIN_BLKSIZE 512
+#define TAPE_MAX_BLKSIZE 1048576
+#define TAPE_EW 20
+#define TAPE_MAX_PARTITIONS 2
+#define TAPE_UNITS 10000
+#define TAPE_PARTITION_1_UNITS 1000
+
+/* The tape block data definitions */
+#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
+#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
+#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
+#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
+#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
+#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
+
+struct tape_block {
+ u32 fl_size;
+ unsigned char data[4];
+};
+
+/* Flags for sense data */
+#define SENSE_FLAG_FILEMARK 0x80
+#define SENSE_FLAG_EOM 0x40
+#define SENSE_FLAG_ILI 0x20
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -216,7 +253,8 @@ static const char *sdebug_version_date = "20210520";
#define SDEBUG_UA_LUNS_CHANGED 5
#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
-#define SDEBUG_NUM_UAS 8
+#define SDEBUG_UA_NOT_READY_TO_READY 8
+#define SDEBUG_NUM_UAS 9
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
@@ -262,11 +300,6 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
-static struct kmem_cache *queued_cmd_cache;
-
-#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
-#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
-
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -363,6 +396,19 @@ struct sdebug_dev_info {
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+ /* For tapes */
+ unsigned int tape_blksize;
+ unsigned int tape_density;
+ unsigned char tape_partition;
+ unsigned char tape_nbr_partitions;
+ unsigned char tape_pending_nbr_partitions;
+ unsigned int tape_pending_part_0_size;
+ unsigned int tape_pending_part_1_size;
+ unsigned char tape_dce;
+ unsigned int tape_location[TAPE_MAX_PARTITIONS];
+ unsigned int tape_eop[TAPE_MAX_PARTITIONS];
+ struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
+
struct dentry *debugfs_entry;
struct spinlock list_lock;
struct list_head inject_err_list;
@@ -409,24 +455,9 @@ struct sdebug_defer {
enum sdeb_defer_type defer_t;
};
-struct sdebug_device_access_info {
- bool atomic_write;
- u64 lba;
- u32 num;
- struct scsi_cmnd *self;
-};
-
-struct sdebug_queued_cmd {
- /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
- * instance indicates this slot is in use.
- */
- struct sdebug_defer sd_dp;
- struct scsi_cmnd *scmd;
- struct sdebug_device_access_info *i;
-};
-
struct sdebug_scsi_cmd {
spinlock_t lock;
+ struct sdebug_defer sd_dp;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -483,22 +514,27 @@ enum sdeb_opcode_index {
SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
SDEB_I_ATOMIC_WRITE_16 = 32,
- SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
+ SDEB_I_READ_BLOCK_LIMITS = 33,
+ SDEB_I_LOCATE = 34,
+ SDEB_I_WRITE_FILEMARKS = 35,
+ SDEB_I_SPACE = 36,
+ SDEB_I_FORMAT_MEDIUM = 37,
+ SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
};
static const unsigned char opcode_ind_arr[256] = {
/* 0x0; 0x0->0x1f: 6 byte cdbs */
SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
- 0, 0, 0, 0,
+ SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
- SDEB_I_RELEASE,
+ SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
+ SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
- SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
@@ -573,6 +609,12 @@ static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -581,8 +623,6 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
-static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
-
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -773,7 +813,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 20 */
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {0, 0x1, 0, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -800,6 +840,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
+ /* READ POSITION (10) */
/* 30 */
{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
@@ -810,11 +851,23 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
-/* 31 */
+/* 32 */
{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
+ {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 38 */
/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -1331,6 +1384,30 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
my_name, key, asc, asq);
}
+/* Sense data that has information fields for tapes */
+static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
+ unsigned int information, unsigned char tape_flags)
+{
+ if (!scp->sense_buffer) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
+ /* only fixed format so far */
+
+ scp->sense_buffer[0] |= 0x80; /* valid */
+ scp->sense_buffer[2] |= tape_flags;
+ put_unaligned_be32(information, &scp->sense_buffer[3]);
+
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
+}
+
static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
@@ -1493,6 +1570,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "reported luns data has changed";
break;
+ case SDEBUG_UA_NOT_READY_TO_READY:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
+ 0);
+ if (sdebug_verbose)
+ cp = "not ready to ready transition/media change";
+ break;
default:
pr_warn("unexpected unit attention code=%d\n", k);
if (sdebug_verbose)
@@ -2196,6 +2279,14 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
+ if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ int i;
+
+ set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_partition = 0;
+ }
if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
return SDEG_RES_IMMED_MASK;
else
@@ -2728,6 +2819,76 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
+static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
+ 0xff, 0xff, 0x00, 0x00};
+
+static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
+{ /* Partition page for mode_sense (tape) */
+ memcpy(p, partition_pg, sizeof(partition_pg));
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(partition_pg) - 2);
+ return sizeof(partition_pg);
+}
+
+static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
+ unsigned char *new, int pg_len)
+{
+ int new_nbr, p0_size, p1_size;
+
+ if ((new[4] & 0x80) != 0) { /* FDP */
+ partition_pg[4] |= 0x80;
+ devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
+ devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
+ devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
+ } else {
+ new_nbr = new[3] + 1;
+ if (new_nbr > TAPE_MAX_PARTITIONS)
+ return 3;
+ if ((new[4] & 0x40) != 0) { /* SDP */
+ p1_size = TAPE_PARTITION_1_UNITS;
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100)
+ return 4;
+ } else if ((new[4] & 0x20) != 0) {
+ if (new_nbr > 1) {
+ p0_size = get_unaligned_be16(new + 8);
+ p1_size = get_unaligned_be16(new + 10);
+ if (p1_size == 0xFFFF)
+ p1_size = TAPE_UNITS - p0_size;
+ else if (p0_size == 0xFFFF)
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100 || p1_size < 100)
+ return 8;
+ } else {
+ p0_size = TAPE_UNITS;
+ p1_size = 0;
+ }
+ } else
+ return 6;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ devip->tape_pending_part_0_size = p0_size;
+ devip->tape_pending_part_1_size = p1_size;
+ partition_pg[3] = new_nbr;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ }
+
+ return 0;
+}
+
+static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
+ unsigned char dce)
+{ /* Compression page for mode_sense (tape) */
+ unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 00, 00};
+
+ memcpy(p, compression_pg, sizeof(compression_pg));
+ if (dce)
+ p[2] |= 0x80;
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(compression_pg) - 2);
+ return sizeof(compression_pg);
+}
+
/* PAGE_SIZE is more than necessary but provides room for future expansion. */
#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
@@ -2742,7 +2903,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr)
@@ -2755,7 +2916,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
is_zbc = devip->zoned;
- if ((is_disk || is_zbc) && !dbd)
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2793,15 +2955,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
put_unaligned_be32(0xffffffff, ap + 0);
else
put_unaligned_be32(sdebug_capacity, ap + 0);
- put_unaligned_be16(sdebug_sector_size, ap + 6);
+ if (is_tape) {
+ ap[0] = devip->tape_density;
+ put_unaligned_be16(devip->tape_blksize, ap + 6);
+ } else
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
+ if (is_tape) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
+ return check_condition_result;
+ }
put_unaligned_be64((u64)sdebug_capacity, ap + 0);
put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
+ if (cmd[2] == 0)
+ goto only_bd; /* Only block descriptor requested */
/*
* N.B. If len>0 before resp_*_pg() call, then form of that call should be:
@@ -2857,6 +3029,18 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
offset += len;
break;
+ case 0xf: /* Compression Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
+ offset += len;
+ break;
+ case 0x11: /* Partition Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_partition_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
case 0x19: /* if spc==1 then sas phy, control+discover */
if (subpcode > 0x2 && subpcode < 0xff)
goto bad_subpcode;
@@ -2902,6 +3086,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
default:
goto bad_pcode;
}
+only_bd:
if (msense_6)
arr[0] = offset - 1;
else
@@ -2945,8 +3130,34 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- off = bd_len + (mselect6 ? 4 : 8);
- if (md_len > 2 || off >= res) {
+ off = (mselect6 ? 4 : 8);
+ if (sdebug_ptype == TYPE_TAPE) {
+ int blksize;
+
+ if (bd_len != 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA,
+ mselect6 ? 3 : 6, -1);
+ return check_condition_result;
+ }
+ if (arr[off] == TAPE_BAD_DENSITY) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ return check_condition_result;
+ }
+ blksize = get_unaligned_be16(arr + off + 6);
+ if (blksize != 0 &&
+ (blksize < TAPE_MIN_BLKSIZE ||
+ blksize > TAPE_MAX_BLKSIZE ||
+ (blksize % 4) != 0)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
+ return check_condition_result;
+ }
+ devip->tape_density = arr[off];
+ devip->tape_blksize = blksize;
+ }
+ off += bd_len;
+ if (off >= res)
+ return 0; /* No page written, just descriptors */
+ if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
@@ -2984,6 +3195,25 @@ static int resp_mode_select(struct scsi_cmnd *scp,
goto set_mode_changed_ua;
}
break;
+ case 0xf: /* Compression mode page */
+ if (sdebug_ptype != TYPE_TAPE)
+ goto bad_pcode;
+ if ((arr[off + 2] & 0x40) != 0) {
+ devip->tape_dce = (arr[off + 2] & 0x80) != 0;
+ return 0;
+ }
+ break;
+ case 0x11: /* Medium Partition Mode Page (tape) */
+ if (sdebug_ptype == TYPE_TAPE) {
+ int fld;
+
+ fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
+ if (fld == 0)
+ return 0;
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
+ return check_condition_result;
+ }
+ break;
case 0x1c: /* Informational Exceptions Mode page */
if (iec_m_pg[1] == arr[off + 1]) {
memcpy(iec_m_pg + 2, arr + off + 2,
@@ -2999,6 +3229,10 @@ static int resp_mode_select(struct scsi_cmnd *scp,
set_mode_changed_ua:
set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
return 0;
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
}
static int resp_temp_l_pg(unsigned char *arr)
@@ -3138,6 +3372,265 @@ static int resp_log_sense(struct scsi_cmnd *scp,
min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
}
+enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
+static int resp_read_blklimits(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
+
+ arr[0] = 4;
+ put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
+ put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
+}
+
+static int resp_locate(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, pos;
+ struct tape_block *blp;
+ int partition;
+
+ if ((cmd[1] & 0x02) != 0) {
+ if (cmd[8] >= devip->tape_nbr_partitions) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_partition = cmd[8];
+ }
+ pos = get_unaligned_be32(cmd + 3);
+ partition = devip->tape_partition;
+
+ for (i = 0, blp = devip->tape_blocks[partition];
+ i < pos && i < devip->tape_eop[partition]; i++, blp++)
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ break;
+ if (i < pos) {
+ devip->tape_location[partition] = i;
+ mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_write_filemarks(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, count, pos;
+ u32 data;
+ int partition = devip->tape_partition;
+
+ if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+ count = get_unaligned_be24(cmd + 2);
+ data = TAPE_BLOCK_FM_FLAG;
+ for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
+ if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size = data;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size =
+ TAPE_BLOCK_EOD_FLAG;
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_space(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd, code;
+ int i = 0, pos, count;
+ struct tape_block *blp;
+ int partition = devip->tape_partition;
+
+ count = get_unaligned_be24(cmd + 2);
+ if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
+ count |= 0xff000000;
+ code = cmd[1] & 0x0f;
+
+ pos = devip->tape_location[partition];
+ if (code == 0) { /* blocks */
+ if (count < 0) {
+ count = (-count);
+ pos -= 1;
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++) {
+ if (pos < 0)
+ goto is_bop;
+ else if (IS_TAPE_BLOCK_FM(blp->fl_size))
+ goto is_fm;
+ if (i > 0) {
+ pos--;
+ blp--;
+ }
+ }
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++, pos++, blp++) {
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ pos += 1;
+ goto is_fm;
+ }
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 1) { /* filemarks */
+ if (count < 0) {
+ count = (-count);
+ if (pos == 0)
+ goto is_bop;
+ else {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count && pos >= 0; i++, pos--, blp--) {
+ for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ pos >= 0; pos--, blp--)
+ ; /* empty */
+ if (pos < 0)
+ goto is_bop;
+ }
+ }
+ pos += 1;
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count; i++, pos++, blp++) {
+ for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
+ pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 3) { /* EOD */
+ for (blp = devip->tape_blocks[partition] + pos;
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ } else {
+ /* sequential filemarks not supported */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+ return 0;
+
+is_fm:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_FILEMARK);
+ return check_condition_result;
+
+is_eod:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, count - i,
+ 0);
+ return check_condition_result;
+
+is_bop:
+ devip->tape_location[partition] = 0;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = 0;
+ return check_condition_result;
+
+is_eop:
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+}
+
+static int resp_rewind(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ devip->tape_location[devip->tape_partition] = 0;
+
+ return 0;
+}
+
+static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
+ int part_0_size, int part_1_size)
+{
+ int i;
+
+ if (part_0_size + part_1_size > TAPE_UNITS)
+ return -1;
+ devip->tape_eop[0] = part_0_size;
+ devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
+ devip->tape_eop[1] = part_1_size;
+ devip->tape_blocks[1] = devip->tape_blocks[0] +
+ devip->tape_eop[0];
+ devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+
+ devip->tape_nbr_partitions = nbr_partitions;
+ devip->tape_partition = 0;
+
+ partition_pg[3] = nbr_partitions - 1;
+ put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
+ put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
+
+ return nbr_partitions;
+}
+
+static int resp_format_medium(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ unsigned char *cmd = scp->cmnd;
+
+ if (sdebug_ptype != TYPE_TAPE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] > 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] != 0) {
+ if (devip->tape_pending_nbr_partitions > 0) {
+ res = partition_tape(devip,
+ devip->tape_pending_nbr_partitions,
+ devip->tape_pending_part_0_size,
+ devip->tape_pending_part_1_size);
+ } else
+ res = partition_tape(devip, devip->tape_nbr_partitions,
+ devip->tape_eop[0], devip->tape_eop[1]);
+ } else
+ res = partition_tape(devip, 1, TAPE_UNITS, 0);
+ if (res < 0)
+ return -EINVAL;
+
+ devip->tape_pending_nbr_partitions = -1;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -3871,6 +4364,98 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
+static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ u32 pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, sili;
+
+ if (cmd[0] != READ_6) { /* Only Read(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ fixed = (cmd[1] & 0x1) != 0;
+ sili = (cmd[1] & 0x2) != 0;
+ if (fixed && sili) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < num && pos < devip->tape_eop[partition];
+ i++, pos++, blp++) {
+ devip->tape_location[partition] = pos + 1;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_FILEMARK);
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ /* Assume no REW */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, fixed ? num - i : size,
+ 0);
+ devip->tape_location[partition] = pos;
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
+ size, i * size);
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, false);
+ if (fixed) {
+ if (blp->fl_size != devip->tape_blksize) {
+ scsi_set_resid(scp, (num - i) * size);
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, num - i,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ } else {
+ if (blp->fl_size != size) {
+ if (blp->fl_size < size)
+ scsi_set_resid(scp, size - blp->fl_size);
+ if (!sili) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, size - blp->fl_size,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ }
+ }
+ }
+ if (pos >= devip->tape_eop[partition]) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = pos - 1;
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3882,6 +4467,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_read_tape(scp, devip);
+
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4178,6 +4766,67 @@ static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
}
}
+static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size, written = 0;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, ew;
+
+ if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+
+ fixed = (cmd[1] & 1) != 0;
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ scsi_set_resid(scp, num * transfer);
+ for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
+ i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
+ blp->fl_size = size;
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, true);
+ written += size;
+ scsi_set_resid(scp, num * transfer - written);
+ ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
+ }
+
+ devip->tape_location[partition] = pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+ if (pos >= devip->tape_eop[partition] - 1) {
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ if (ew) { /* early warning */
+ mk_sense_info_tape(scp, NO_SENSE,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+
+ return 0;
+}
+
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -4190,6 +4839,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_write_tape(scp, devip);
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -4918,7 +5570,10 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
* a GOOD status otherwise. Model a disk with a big cache and yield
* CONDITION MET. Actually tries to bring range in main memory into the
* cache associated with the CPU(s).
+ *
+ * The pcode 0x34 is also used for READ POSITION by tape devices.
*/
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -4930,6 +5585,31 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr,
+ SDEBUG_READ_POSITION_ARR_SZ);
+ }
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -5638,10 +6318,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
+ typeof(*sdsc), sd_dp);
+ struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
unsigned long flags;
- struct scsi_cmnd *scp = sqcp->scmd;
- struct sdebug_scsi_cmd *sdsc;
bool aborted;
if (sdebug_statistics) {
@@ -5652,27 +6332,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
if (!scp) {
pr_err("scmd=NULL\n");
- goto out;
+ return;
}
- sdsc = scsi_cmd_priv(scp);
spin_lock_irqsave(&sdsc->lock, flags);
aborted = sd_dp->aborted;
if (unlikely(aborted))
sd_dp->aborted = false;
- ASSIGN_QUEUED_CMD(scp, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (aborted) {
pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
blk_abort_request(scsi_cmd_to_rq(scp));
- goto out;
+ return;
}
scsi_done(scp); /* callback to mid level */
-out:
- sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5835,6 +6511,10 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zoned = false;
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ }
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
spin_lock_init(&devip->list_lock);
@@ -5879,23 +6559,24 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
return open_devip;
}
-static int scsi_debug_slave_alloc(struct scsi_device *sdp)
+static int scsi_debug_sdev_init(struct scsi_device *sdp)
{
if (sdebug_verbose)
- pr_info("slave_alloc <%u %u %u %llu>\n",
+ pr_info("sdev_init <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
return 0;
}
-static int scsi_debug_slave_configure(struct scsi_device *sdp)
+static int scsi_debug_sdev_configure(struct scsi_device *sdp,
+ struct queue_limits *lim)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct dentry *dentry;
if (sdebug_verbose)
- pr_info("slave_configure <%u %u %u %llu>\n",
+ pr_info("sdev_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
@@ -5904,6 +6585,21 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
if (devip == NULL)
return 1; /* no resources, will be marked offline */
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (!devip->tape_blocks[0]) {
+ devip->tape_blocks[0] =
+ kcalloc(TAPE_UNITS, sizeof(struct tape_block),
+ GFP_KERNEL);
+ if (!devip->tape_blocks[0])
+ return 1;
+ }
+ devip->tape_pending_nbr_partitions = -1;
+ if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ return 1;
+ }
+ }
sdp->hostdata = devip;
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
@@ -5927,14 +6623,14 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
return 0;
}
-static void scsi_debug_slave_destroy(struct scsi_device *sdp)
+static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
{
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
struct sdebug_err_inject *err;
if (sdebug_verbose)
- pr_info("slave_destroy <%u %u %u %llu>\n",
+ pr_info("sdev_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (!devip)
@@ -5949,31 +6645,41 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
+ if (sdebug_ptype == TYPE_TAPE) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ }
+
/* make this slot available for re-use */
devip->used = false;
sdp->hostdata = NULL;
}
-/* Returns true if we require the queued memory to be freed by the caller. */
-static bool stop_qc_helper(struct sdebug_defer *sd_dp,
- enum sdeb_defer_type defer_t)
+/* Returns true if cancelled or not running callback. */
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
+
+ lockdep_assert_held(&sdsc->lock);
+
if (defer_t == SDEB_DEFER_HRT) {
int res = hrtimer_try_to_cancel(&sd_dp->hrt);
switch (res) {
- case 0: /* Not active, it must have already run */
case -1: /* -1 It's executing the CB */
return false;
+ case 0: /* Not active, it must have already run */
case 1: /* Was active, we've now cancelled */
default:
return true;
}
} else if (defer_t == SDEB_DEFER_WQ) {
/* Cancel if pending */
- if (cancel_work_sync(&sd_dp->ew.work))
+ if (cancel_work(&sd_dp->ew.work))
return true;
- /* Was not pending, so it must have run */
+ /* callback may be running, so return false */
return false;
} else if (defer_t == SDEB_DEFER_POLL) {
return true;
@@ -5982,28 +6688,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp,
return false;
}
-
-static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
-{
- enum sdeb_defer_type l_defer_t;
- struct sdebug_defer *sd_dp;
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
-
- lockdep_assert_held(&sdsc->lock);
-
- if (!sqcp)
- return false;
- sd_dp = &sqcp->sd_dp;
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- ASSIGN_QUEUED_CMD(cmnd, NULL);
-
- if (stop_qc_helper(sd_dp, l_defer_t))
- sdebug_free_queued_cmd(sqcp);
-
- return true;
-}
-
/*
* Called from scsi_debug_abort() only, which is for timed-out cmd.
*/
@@ -6075,7 +6759,7 @@ static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok = scsi_debug_abort_cmnd(SCpnt);
+ bool aborted = scsi_debug_abort_cmnd(SCpnt);
u8 *cmd = SCpnt->cmnd;
u8 opcode = cmd[0];
@@ -6084,7 +6768,8 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: command%s found\n", __func__,
- ok ? "" : " not");
+ aborted ? "" : " not");
+
if (sdebug_fail_abort(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
@@ -6092,6 +6777,9 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return FAILED;
}
+ if (aborted == false)
+ return FAILED;
+
return SUCCESS;
}
@@ -6143,6 +6831,22 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
return 0;
}
+static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
+{
+ if (sdebug_ptype == TYPE_TAPE) {
+ int i;
+
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
+ }
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@@ -6156,8 +6860,10 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
- if (devip)
+ if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
+ }
if (sdebug_fail_lun_reset(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
@@ -6195,6 +6901,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6226,6 +6933,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6249,6 +6957,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6365,33 +7074,6 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
-
-void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
-{
- if (sqcp)
- kmem_cache_free(queued_cmd_cache, sqcp);
-}
-
-static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
-{
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_defer *sd_dp;
-
- sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
- if (!sqcp)
- return NULL;
-
- sd_dp = &sqcp->sd_dp;
-
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
-
- sqcp->scmd = scmd;
-
- return sqcp;
-}
-
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -6408,7 +7090,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -6440,12 +7121,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
}
- sqcp = sdebug_alloc_queued_cmd(cmnd);
- if (!sqcp) {
- pr_err("%s no alloc\n", __func__);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
ns_from_boot = ktime_get_boottime_ns();
@@ -6493,7 +7169,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (kt <= d) { /* elapsed duration >= kt */
/* call scsi_done() from this thread */
- sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -6506,13 +7181,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
/* schedule the invocation of scsi_done() for a later time */
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
/*
@@ -6536,13 +7209,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
spin_unlock_irqrestore(&sdsc->lock, flags);
@@ -6834,7 +7505,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
&data);
if (f >= 0) {
- seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
+ seq_printf(m, " BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
}
@@ -7909,12 +8580,6 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
- if (!queued_cmd_cache) {
- ret = -ENOMEM;
- goto driver_unreg;
- }
-
sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
if (IS_ERR_OR_NULL(sdebug_debugfs_root))
pr_info("%s: failed to create initial debugfs directory\n", __func__);
@@ -7941,8 +8606,6 @@ static int __init scsi_debug_init(void)
return 0;
-driver_unreg:
- driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
@@ -7958,7 +8621,6 @@ static void __exit scsi_debug_exit(void)
for (; k; k--)
sdebug_do_remove_host(true);
- kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -8342,7 +9004,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
struct sdebug_defer *sd_dp;
u32 unique_tag = blk_mq_unique_tag(rq);
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
- struct sdebug_queued_cmd *sqcp;
unsigned long flags;
int queue_num = data->queue_num;
ktime_t time;
@@ -8358,13 +9019,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
time = ktime_get_boottime();
spin_lock_irqsave(&sdsc->lock, flags);
- sqcp = TO_QUEUED_CMD(cmd);
- if (!sqcp) {
- spin_unlock_irqrestore(&sdsc->lock, flags);
- return true;
- }
-
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
@@ -8374,8 +9029,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
}
-
- ASSIGN_QUEUED_CMD(cmd, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (sdebug_statistics) {
@@ -8384,8 +9037,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
atomic_inc(&sdebug_miss_cpus);
}
- sdebug_free_queued_cmd(sqcp);
-
scsi_done(cmd); /* callback to mid level */
(*data->num_entries)++;
return true;
@@ -8700,21 +9351,25 @@ err_out:
static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
spin_lock_init(&sdsc->lock);
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
return 0;
}
-static struct scsi_host_template sdebug_driver_template = {
+static const struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
.proc_name = sdebug_proc_name,
.name = "SCSI DEBUG",
.info = scsi_debug_info,
- .slave_alloc = scsi_debug_slave_alloc,
- .slave_configure = scsi_debug_slave_configure,
- .slave_destroy = scsi_debug_slave_destroy,
+ .sdev_init = scsi_debug_sdev_init,
+ .sdev_configure = scsi_debug_sdev_configure,
+ .sdev_destroy = scsi_debug_sdev_destroy,
.ioctl = scsi_debug_ioctl,
.queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
@@ -8732,6 +9387,7 @@ static struct scsi_host_template sdebug_driver_template = {
.max_sectors = -1U,
.max_segment_size = -1U,
.module = THIS_MODULE,
+ .skip_settle_delay = 1,
.track_queue_depth = 1,
.cmd_size = sizeof(struct sdebug_scsi_cmd),
.init_cmd_priv = sdebug_init_cmd_priv,
@@ -8748,17 +9404,17 @@ static int sdebug_driver_probe(struct device *dev)
sdbg_host = dev_to_sdebug_host(dev);
- sdebug_driver_template.can_queue = sdebug_max_queue;
- sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
- if (!sdebug_clustering)
- sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
-
hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
if (NULL == hpnt) {
pr_err("scsi_host_alloc failed\n");
error = -ENODEV;
return error;
}
+ hpnt->can_queue = sdebug_max_queue;
+ hpnt->cmd_per_lun = sdebug_max_queue;
+ if (!sdebug_clustering)
+ hpnt->dma_boundary = PAGE_SIZE - 1;
+
if (submit_queues > nr_cpu_ids) {
pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
my_name, submit_queues, nr_cpu_ids);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 10154d78e336..376b8897ab90 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -547,6 +547,18 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
scsi_report_sense(sdev, &sshdr);
+ if (sshdr.sense_key == UNIT_ATTENTION) {
+ /*
+ * Increment the counters for Power on/Reset or New Media so
+ * that all ULDs interested in these can see that those have
+ * happened, even if someone else gets the sense data.
+ */
+ if (sshdr.asc == 0x28)
+ scmd->device->ua_new_media_ctr++;
+ else if (sshdr.asc == 0x29)
+ scmd->device->ua_por_ctr++;
+ }
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -711,6 +723,13 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
case COMPLETED:
+ /*
+ * A command using command duration limits (CDL) with a
+ * descriptor set with policy 0xD may be completed with success
+ * and the sense data DATA CURRENTLY UNAVAILABLE, indicating
+ * that the command was in fact aborted because it exceeded its
+ * duration limit. Never retry these commands.
+ */
if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) {
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
req->cmd_flags |= REQ_FAILFAST_DEV;
@@ -2363,14 +2382,14 @@ int scsi_error_handler(void *data)
return 0;
}
-/*
- * Function: scsi_report_bus_reset()
+/**
+ * scsi_report_bus_reset() - report bus reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a bus reset on the bus being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a bus reset on the bus being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed.
+ * @shost: Host in question
+ * @channel: channel on which reset was observed.
*
* Returns: Nothing
*
@@ -2395,15 +2414,15 @@ void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
}
EXPORT_SYMBOL(scsi_report_bus_reset);
-/*
- * Function: scsi_report_device_reset()
+/**
+ * scsi_report_device_reset() - report device reset observed
*
- * Purpose: Utility function used by low-level drivers to report that
- * they have observed a device reset on the device being handled.
+ * Utility function used by low-level drivers to report that
+ * they have observed a device reset on the device being handled.
*
- * Arguments: shost - Host in question
- * channel - channel on which reset was observed
- * target - target on which reset was observed
+ * @shost: Host in question
+ * @channel: channel on which reset was observed
+ * @target: target on which reset was observed
*
* Returns: Nothing
*
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 6f6c5973c3ea..2fa45556e1ea 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -37,8 +37,10 @@
* @host: host to identify
* @buffer: userspace buffer for identification
*
- * Return an identifying string at @buffer, if @buffer is non-NULL, filling
- * to the length stored at * (int *) @buffer.
+ * Return:
+ * * if successful, %1 and an identifying string at @buffer, if @buffer
+ * is non-NULL, filling to the length stored at * (int *) @buffer.
+ * * <0 error code on failure.
*/
static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
{
@@ -121,6 +123,16 @@ out:
return result;
}
+/**
+ * scsi_set_medium_removal() - send command to allow or prevent medium removal
+ * @sdev: target scsi device
+ * @state: removal state to set (prevent or allow)
+ *
+ * Returns:
+ * * %0 if @sdev is not removable or not lockable or successful.
+ * * non-%0 is a SCSI result code if > 0 or kernel error code if < 0.
+ * * Sets @sdev->locked to the new state on success.
+ */
int scsi_set_medium_removal(struct scsi_device *sdev, char state)
{
char scsi_cmd[MAX_COMMAND_SIZE];
@@ -242,11 +254,15 @@ static int scsi_send_start_stop(struct scsi_device *sdev, int data)
NORMAL_RETRIES);
}
-/*
- * Check if the given command is allowed.
+/**
+ * scsi_cmd_allowed() - Check if the given command is allowed.
+ * @cmd: SCSI command to check
+ * @open_for_write: is the file / block device opened for writing?
*
* Only a subset of commands are allowed for unprivileged users. Commands used
* to format the media, update the firmware, etc. are not permitted.
+ *
+ * Return: %true if the cmd is allowed, otherwise @false.
*/
bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write)
{
@@ -859,6 +875,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, bool open_for_write,
* Description: The scsi_ioctl() function differs from most ioctls in that it
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
+ *
+ * Return: varies depending on the @cmd
*/
int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
void __user *arg)
@@ -941,8 +959,15 @@ int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
}
EXPORT_SYMBOL(scsi_ioctl);
-/*
+/**
+ * scsi_ioctl_block_when_processing_errors - prevent commands from being queued
+ * @sdev: target scsi device
+ * @cmd: which ioctl is it
+ * @ndelay: no delay (non-blocking)
+ *
* We can process a reset even when a device isn't fully operable.
+ *
+ * Return: %0 on success, <0 error code.
*/
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd,
bool ndelay)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 43766589bfc6..1b43013d72c0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -184,6 +184,10 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+/**
+ * scsi_failures_reset_retries - reset all failures to zero
+ * @failures: &struct scsi_failures with specific failure modes set
+ */
void scsi_failures_reset_retries(struct scsi_failures *failures)
{
struct scsi_failure *failure;
@@ -1145,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
+ count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len =
@@ -1222,6 +1226,15 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+/**
+ * scsi_alloc_request - allocate a block request and partially
+ * initialize its &scsi_cmnd
+ * @q: the device's request queue
+ * @opf: the request operation code
+ * @flags: block layer allocation flags
+ *
+ * Return: &struct request pointer on success or %NULL on failure
+ */
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
@@ -1240,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
*/
static void scsi_cleanup_rq(struct request *rq)
{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->flags = 0;
+
if (rq->rq_flags & RQF_DONTPREP) {
- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ scsi_mq_uninit_cmd(cmd);
rq->rq_flags &= ~RQF_DONTPREP;
}
}
@@ -2073,9 +2090,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
- tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
- tag_set->flags |=
- BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
+ if (shost->hostt->tag_alloc_policy_rr)
+ tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;
@@ -2729,6 +2745,7 @@ int
scsi_device_quiesce(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
+ unsigned int memflags;
int err;
/*
@@ -2743,7 +2760,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
blk_set_pm_only(q);
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
/*
* Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
@@ -2751,7 +2768,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
* was called. See also https://lwn.net/Articles/573497/.
*/
synchronize_rcu();
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
@@ -3373,14 +3390,16 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
}
EXPORT_SYMBOL(scsi_vpd_lun_id);
-/*
+/**
* scsi_vpd_tpg_id - return a target port group identifier
* @sdev: SCSI device
+ * @rel_id: pointer to return relative target port in if not %NULL
*
* Returns the Target Port Group identifier from the information
- * froom VPD page 0x83 of the device.
+ * from VPD page 0x83 of the device.
+ * Optionally sets @rel_id to the relative target port on success.
*
- * Returns the identifier or error on failure.
+ * Return: the identifier or error on failure.
*/
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
index 99834426a100..ae8af0e0047a 100644
--- a/drivers/scsi/scsi_lib_test.c
+++ b/drivers/scsi/scsi_lib_test.c
@@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test)
};
int i;
+ /* Success */
+ sc.result = 0;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
+ /* Command failed but caller did not pass in a failures array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
/* Match end of array */
scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 042329b74c6e..4833b8fe251b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -151,8 +151,9 @@ int scsi_complete_async_scans(void)
struct async_scan_data *data;
do {
- if (list_empty(&scanning_hosts))
- return 0;
+ scoped_guard(spinlock, &async_scan_lock)
+ if (list_empty(&scanning_hosts))
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -220,6 +221,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
int new_shift = sbitmap_calculate_shift(depth);
bool need_alloc = !sdev->budget_map.map;
bool need_free = false;
+ unsigned int memflags;
int ret;
struct sbitmap sb_backup;
@@ -227,7 +229,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
/*
* realloc if new shift is calculated, which is caused by setting
- * up one new default queue depth after calling ->device_configure
+ * up one new default queue depth after calling ->sdev_configure
*/
if (!need_alloc && new_shift != sdev->budget_map.shift)
need_alloc = need_free = true;
@@ -240,12 +242,12 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* and here disk isn't added yet, so freezing is pretty fast
*/
if (need_free) {
- blk_mq_freeze_queue(sdev->request_queue);
+ memflags = blk_mq_freeze_queue(sdev->request_queue);
sb_backup = sdev->budget_map;
}
ret = sbitmap_init_node(&sdev->budget_map,
scsi_device_max_queue_depth(sdev),
- new_shift, GFP_KERNEL,
+ new_shift, GFP_NOIO,
sdev->request_queue->node, false, true);
if (!ret)
sbitmap_resize(&sdev->budget_map, depth);
@@ -256,7 +258,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
else
sbitmap_free(&sb_backup);
ret = 0;
- blk_mq_unfreeze_queue(sdev->request_queue);
+ blk_mq_unfreeze_queue(sdev->request_queue, memflags);
}
return ret;
}
@@ -265,7 +267,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
* @lun: which lun
- * @hostdata: usually NULL and set by ->slave_alloc instead
+ * @hostdata: usually NULL and set by ->sdev_init instead
*
* Description:
* Allocate, initialize for io, and return a pointer to a scsi_Device.
@@ -312,11 +314,11 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sdev_gendev.parent = get_device(&starget->dev);
sdev->sdev_target = starget;
- /* usually NULL and set by ->slave_alloc instead */
+ /* usually NULL and set by ->sdev_init instead */
sdev->hostdata = hostdata;
/* if the device needs this changing, it may do so in the
- * slave_configure function */
+ * sdev_configure function */
sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
/*
@@ -363,8 +365,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
scsi_sysfs_device_initialize(sdev);
- if (shost->hostt->slave_alloc) {
- ret = shost->hostt->slave_alloc(sdev);
+ if (shost->hostt->sdev_init) {
+ ret = shost->hostt->sdev_init(sdev);
if (ret) {
/*
* if LLDD reports slave not present, don't clutter
@@ -1074,10 +1076,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
else if (*bflags & BLIST_MAX_1024)
lim.max_hw_sectors = 1024;
- if (hostt->device_configure)
- ret = hostt->device_configure(sdev, &lim);
- else if (hostt->slave_configure)
- ret = hostt->slave_configure(sdev);
+ if (hostt->sdev_configure)
+ ret = hostt->sdev_configure(sdev, &lim);
if (ret) {
queue_limits_cancel_update(sdev->request_queue);
/*
@@ -1097,12 +1097,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
/*
- * The queue_depth is often changed in ->device_configure.
+ * The queue_depth is often changed in ->sdev_configure.
*
* Set up budget map again since memory consumption of the map depends
* on actual queue depth.
*/
- if (hostt->device_configure || hostt->slave_configure)
+ if (hostt->sdev_configure)
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
if (sdev->scsi_level >= SCSI_3)
@@ -1636,6 +1636,24 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
}
EXPORT_SYMBOL(__scsi_add_device);
+/**
+ * scsi_add_device - creates a new SCSI (LU) instance
+ * @host: the &Scsi_Host instance where the device is located
+ * @channel: target channel number (rarely other than %0)
+ * @target: target id number
+ * @lun: LUN of target device
+ *
+ * Probe for a specific LUN and add it if found.
+ *
+ * Notes: This call is usually performed internally during a SCSI
+ * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
+ * should only be called if the HBA becomes aware of a new SCSI
+ * device (LU) after scsi_scan_host() has completed. If successful
+ * this call can lead to sdev_init() and sdev_configure() callbacks
+ * into the LLD.
+ *
+ * Return: %0 on success or negative error code on failure
+ */
int scsi_add_device(struct Scsi_Host *host, uint channel,
uint target, u64 lun)
{
@@ -2027,6 +2045,8 @@ static void do_scan_async(void *_data, async_cookie_t c)
/**
* scsi_scan_host - scan the given adapter
* @shost: adapter to scan
+ *
+ * Notes: Should be called after scsi_add_host()
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index 093774d77534..055a03a83ad6 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -12,12 +12,14 @@
#include "scsi_priv.h"
-static struct ctl_table scsi_table[] = {
+static const struct ctl_table scsi_table[] = {
{ .procname = "logging_level",
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
- .proc_handler = proc_dointvec },
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX },
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f3a1ecb42128..d772258e29ad 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -898,7 +898,7 @@ static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
#define sdev_vpd_pg_attr(_page) \
static ssize_t \
show_vpd_##_page(struct file *filp, struct kobject *kobj, \
- struct bin_attribute *bin_attr, \
+ const struct bin_attribute *bin_attr, \
char *buf, loff_t off, size_t count) \
{ \
struct device *dev = kobj_to_dev(kobj); \
@@ -914,10 +914,10 @@ show_vpd_##_page(struct file *filp, struct kobject *kobj, \
rcu_read_unlock(); \
return ret; \
} \
-static struct bin_attribute dev_attr_vpd_##_page = { \
+static const struct bin_attribute dev_attr_vpd_##_page = { \
.attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
.size = 0, \
- .read = show_vpd_##_page, \
+ .read_new = show_vpd_##_page, \
};
sdev_vpd_pg_attr(pg83);
@@ -930,7 +930,7 @@ sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
@@ -943,13 +943,13 @@ static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
sdev->inquiry_len);
}
-static struct bin_attribute dev_attr_inquiry = {
+static const struct bin_attribute dev_attr_inquiry = {
.attr = {
.name = "inquiry",
.mode = S_IRUGO,
},
.size = 0,
- .read = show_inquiry,
+ .read_new = show_inquiry,
};
static ssize_t
@@ -1348,7 +1348,7 @@ static struct attribute *scsi_sdev_attrs[] = {
NULL
};
-static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+static const struct bin_attribute *const scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
@@ -1362,7 +1362,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
};
static struct attribute_group scsi_sdev_attr_group = {
.attrs = scsi_sdev_attrs,
- .bin_attrs = scsi_sdev_bin_attrs,
+ .bin_attrs_new = scsi_sdev_bin_attrs,
.is_visible = scsi_sdev_attr_is_visible,
.is_bin_visible = scsi_sdev_bin_attr_is_visible,
};
@@ -1513,8 +1513,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
- if (sdev->host->hostt->slave_destroy)
- sdev->host->hostt->slave_destroy(sdev);
+ if (sdev->host->hostt->sdev_destroy)
+ sdev->host->hostt->sdev_destroy(sdev);
transport_destroy_device(dev);
/*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 9b47f91c5b97..c75a806496d6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1324,7 +1324,7 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
* 1 on success
* 0 on failure
*/
-static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+static int iscsi_is_flashnode_conn_dev(struct device *dev, const void *data)
{
return dev->bus == &iscsi_flashnode_bus;
}
@@ -1335,7 +1335,7 @@ static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
return 0;
}
-static int flashnode_match_index(struct device *dev, void *data)
+static int flashnode_match_index(struct device *dev, const void *data)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
int ret = 0;
@@ -1344,7 +1344,7 @@ static int flashnode_match_index(struct device *dev, void *data)
goto exit_match_index;
fnode_sess = iscsi_dev_to_flash_session(dev);
- ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+ ret = (fnode_sess->target_id == *((const int *)data)) ? 1 : 0;
exit_match_index:
return ret;
@@ -1389,8 +1389,8 @@ iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
* %NULL on failure
*/
struct device *
-iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
- int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data,
+ device_match_t fn)
{
return device_find_child(&shost->shost_gendev, data, fn);
}
@@ -2122,33 +2122,6 @@ destroy_wq:
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
-/**
- * iscsi_create_session - create iscsi class session
- * @shost: scsi host
- * @transport: iscsi transport
- * @dd_size: private driver data size
- * @target_id: which target
- *
- * This can be called from a LLD or iscsi_transport.
- */
-struct iscsi_cls_session *
-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
- int dd_size, unsigned int target_id)
-{
- struct iscsi_cls_session *session;
-
- session = iscsi_alloc_session(shost, transport, dd_size);
- if (!session)
- return NULL;
-
- if (iscsi_add_session(session, target_id)) {
- iscsi_free_session(session);
- return NULL;
- }
- return session;
-}
-EXPORT_SYMBOL_GPL(iscsi_create_session);
-
static void iscsi_conn_release(struct device *dev)
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
@@ -3209,11 +3182,14 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
/* see similar check in iscsi_if_set_param() */
- if (strlen(data) > ev->u.set_host_param.len)
- return -EINVAL;
+ if (strlen(data) > ev->u.set_host_param.len) {
+ err = -EINVAL;
+ goto out;
+ }
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
+out:
scsi_host_put(shost);
return err;
}
@@ -3523,7 +3499,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.new_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_new_fnode;
}
index = transport->new_flashnode(shost, data, len);
@@ -3533,7 +3509,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
else
err = -EIO;
-put_host:
scsi_host_put(shost);
exit_new_fnode:
@@ -3558,7 +3533,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.del_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_del_fnode;
}
idx = ev->u.del_flashnode.flashnode_idx;
@@ -3600,7 +3575,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.login_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_login_fnode;
}
idx = ev->u.login_flashnode.flashnode_idx;
@@ -3652,7 +3627,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_fnode;
}
idx = ev->u.logout_flashnode.flashnode_idx;
@@ -3702,7 +3677,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_sid;
}
session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4e33f1661e4c..351b028ef893 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -888,7 +888,8 @@ static void sas_port_delete_link(struct sas_port *port,
sysfs_remove_link(&phy->dev.kobj, "port");
}
-/** sas_port_alloc - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc - allocate and initialize a SAS port structure
*
* @parent: parent device
* @port_id: port number
@@ -897,7 +898,7 @@ static void sas_port_delete_link(struct sas_port *port,
* below the device specified by @parent which must be either a Scsi_Host
* or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc(struct device *parent, int port_id)
{
@@ -932,7 +933,8 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
}
EXPORT_SYMBOL(sas_port_alloc);
-/** sas_port_alloc_num - allocate and initialize a SAS port structure
+/**
+ * sas_port_alloc_num - allocate and initialize a SAS port structure
*
* @parent: parent device
*
@@ -942,7 +944,7 @@ EXPORT_SYMBOL(sas_port_alloc);
* the device tree below the device specified by @parent which must be
* either a Scsi_Host or a sas_expander_device.
*
- * Returns %NULL on error
+ * Returns: %NULL on error
*/
struct sas_port *sas_port_alloc_num(struct device *parent)
{
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 64852e6df3e3..fe47850a8258 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -985,7 +985,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
}
-/** spi_dv_device - Do Domain Validation on the device
+/**
+ * spi_dv_device - Do Domain Validation on the device
* @sdev: scsi device to validate
*
* Performs the domain validation on the given device in the
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 64f6b22e8cc0..aeb58a9e6b7f 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -388,7 +388,7 @@ static void srp_reconnect_work(struct work_struct *work)
"reconnect attempt %d failed (%d)\n",
++rport->failed_reconnects, res);
delay = rport->reconnect_delay *
- min(100, max(1, rport->failed_reconnects - 10));
+ clamp(rport->failed_reconnects - 10, 1, 100);
if (delay > 0)
queue_delayed_work(system_long_wq,
&rport->reconnect_work, delay * HZ);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index af62a8ed8620..89d5c4b17bc4 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -809,14 +809,14 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_GUARD))
scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
}
if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
- if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
+ if (bio_integrity_flagged(bio, BIP_CHECK_REFTAG))
scmd->prot_flags |= SCSI_PROT_REF_CHECK;
}
@@ -991,6 +991,7 @@ static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
lim->atomic_write_hw_boundary = 0;
lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+ lim->features |= BLK_FEAT_ATOMIC_WRITES;
}
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
@@ -3383,7 +3384,7 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
rcu_read_lock();
vpd = rcu_dereference(sdkp->device->vpd_pgb7);
- if (vpd && vpd->len >= 2)
+ if (vpd && vpd->len >= 6)
sdkp->rscs = vpd->data[5] & 1;
rcu_read_unlock();
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 7a447ff600d2..a8db66428f80 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
unsigned int nr_zones, size_t *buflen)
{
struct request_queue *q = sdkp->disk->queue;
+ unsigned int max_segments;
size_t bufsize;
void *buf;
@@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
+ * Since max segments can be larger than the max inline bio vectors,
+ * further limit the allocated buffer to BIO_MAX_INLINE_VECS.
*/
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
- bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+ max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
+ bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT);
while (bufsize >= SECTOR_SIZE) {
buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 94127868bedf..effb7e768165 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1639,7 +1639,7 @@ MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
-static struct ctl_table sg_sysctls[] = {
+static const struct ctl_table sg_sysctls[] = {
{
.procname = "sg-big-buff",
.data = &sg_big_buff,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 870f37b70546..66c9d1a2c94d 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,7 +19,7 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
+#include <linux/crash_dump.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -3854,7 +3854,7 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
- del_timer_sync(&ctrl_info->heartbeat_timer);
+ timer_delete_sync(&ctrl_info->heartbeat_timer);
}
static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
@@ -5247,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
ctrl_info->error_buffer_length =
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
- if (reset_devices)
+ if (is_kdump_kernel())
max_transfer_size = min(ctrl_info->max_transfer_size,
PQI_MAX_TRANSFER_SIZE_KDUMP);
else
@@ -5276,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
u16 num_elements_per_iq;
u16 num_elements_per_oq;
- if (reset_devices) {
+ if (is_kdump_kernel()) {
num_queue_groups = 1;
} else {
int num_cpus;
@@ -5990,7 +5990,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
pqi_stream_data->next_lba = rmd.first_block +
rmd.block_cnt;
pqi_stream_data->last_accessed = jiffies;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++;
return true;
}
@@ -6069,7 +6069,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
+ per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -6490,7 +6490,7 @@ out:
return SUCCESS;
}
-static int pqi_slave_alloc(struct scsi_device *sdev)
+static int pqi_sdev_init(struct scsi_device *sdev)
{
struct pqi_scsi_dev *device;
unsigned long flags;
@@ -6547,10 +6547,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
if (!ctrl_info->disable_managed_interrupts)
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ &ctrl_info->pci_dev->dev, 0);
else
- return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
@@ -6558,7 +6558,8 @@ static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
}
-static int pqi_slave_configure(struct scsi_device *sdev)
+static int pqi_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
int rc = 0;
struct pqi_scsi_dev *device;
@@ -6574,7 +6575,7 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
-static void pqi_slave_destroy(struct scsi_device *sdev)
+static void pqi_sdev_destroy(struct scsi_device *sdev)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -7549,9 +7550,9 @@ static const struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.eh_abort_handler = pqi_eh_abort_handler,
.ioctl = pqi_ioctl,
- .slave_alloc = pqi_slave_alloc,
- .slave_configure = pqi_slave_configure,
- .slave_destroy = pqi_slave_destroy,
+ .sdev_init = pqi_sdev_init,
+ .sdev_configure = pqi_sdev_configure,
+ .sdev_destroy = pqi_sdev_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
@@ -8288,12 +8289,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
u32 product_id;
if (reset_devices) {
- if (pqi_is_fw_triage_supported(ctrl_info)) {
+ if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
rc = sis_wait_for_fw_triage_completion(ctrl_info);
if (rc)
return rc;
}
- if (sis_is_ctrl_logging_supported(ctrl_info)) {
+ if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
sis_notify_kdump(ctrl_info);
rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
if (rc)
@@ -8344,7 +8345,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->product_id = (u8)product_id;
ctrl_info->product_revision = (u8)(product_id >> 8);
- if (reset_devices) {
+ if (is_kdump_kernel()) {
if (ctrl_info->max_outstanding_requests >
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
ctrl_info->max_outstanding_requests =
@@ -8480,7 +8481,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- if (ctrl_info->ctrl_logging_supported && !reset_devices) {
+ if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
}
@@ -9710,6 +9711,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x00a3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x00a1)
},
{
@@ -10046,6 +10051,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4044)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4054)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4084)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4094)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4140)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x207d, 0x4240)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADVANTECH, 0x8312)
},
{
@@ -10262,6 +10291,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1018, 0x8238)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f3f, 0x0610)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0220)
},
{
@@ -10270,10 +10307,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0222)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0223)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0224)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0225)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0520)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0521)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_LENOVO, 0x0522)
},
{
@@ -10294,6 +10351,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0624)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0625)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0626)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0627)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0628)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1014, 0x0718)
},
{
@@ -10322,6 +10399,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1ded, 0x3301)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1ff9, 0x0045)
},
{
@@ -10470,6 +10551,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1f51, 0x100e)
},
{
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 9be3f0193145..1c24517e4e65 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -21,7 +21,7 @@
#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
/* Supported devices by snic module */
-static struct pci_device_id snic_id_table[] = {
+static const struct pci_device_id snic_id_table[] = {
{PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
{ 0, } /* end of table */
};
@@ -42,11 +42,11 @@ module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
/*
- * snic_slave_alloc : callback function to SCSI Mid Layer, called on
+ * snic_sdev_init : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_alloc(struct scsi_device *sdev)
+snic_sdev_init(struct scsi_device *sdev)
{
struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
@@ -57,11 +57,11 @@ snic_slave_alloc(struct scsi_device *sdev)
}
/*
- * snic_slave_configure : callback function to SCSI Mid Layer, called on
+ * snic_sdev_configure : callback function to SCSI Mid Layer, called on
* scsi device initialization.
*/
static int
-snic_slave_configure(struct scsi_device *sdev)
+snic_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
struct snic *snic = shost_priv(sdev->host);
u32 qdepth = 0, max_ios = 0;
@@ -107,8 +107,8 @@ static const struct scsi_host_template snic_host_template = {
.eh_abort_handler = snic_abort_cmd,
.eh_device_reset_handler = snic_device_reset,
.eh_host_reset_handler = snic_host_reset,
- .slave_alloc = snic_slave_alloc,
- .slave_configure = snic_slave_configure,
+ .sdev_init = snic_sdev_init,
+ .sdev_configure = snic_sdev_configure,
.change_queue_depth = snic_change_queue_depth,
.this_id = -1,
.cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ebbd50ec0cda..74a6830b7ed8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -163,9 +163,11 @@ static const char *st_formats[] = {
static int debugging = DEBUG;
+/* Setting these non-zero may risk recognizing resets */
#define MAX_RETRIES 0
#define MAX_WRITE_RETRIES 0
#define MAX_READY_RETRIES 0
+
#define NO_TAPE NOT_READY
#define ST_TIMEOUT (900 * HZ)
@@ -357,10 +359,18 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
{
int result = SRpnt->result;
u8 scode;
+ unsigned int ctr;
DEB(const char *stp;)
char *name = STp->name;
struct st_cmdstatus *cmdstatp;
+ ctr = scsi_get_ua_por_ctr(STp->device);
+ if (ctr != STp->por_ctr) {
+ STp->por_ctr = ctr;
+ STp->pos_unknown = 1; /* ASC => power on / reset */
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
+
if (!result)
return 0;
@@ -413,10 +423,11 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
- if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION &&
+ cmdstatp->sense_hdr.asc == 0x29 && !STp->pos_unknown) {
STp->pos_unknown = 1; /* ASC => power on / reset */
-
- STp->pos_unknown |= STp->device->was_reset;
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
if (cmdstatp->have_sense &&
scode == RECOVERED_ERROR
@@ -952,7 +963,6 @@ static void reset_state(struct scsi_tape *STp)
STp->partition = find_partition(STp);
if (STp->partition < 0)
STp->partition = 0;
- STp->new_partition = STp->partition;
}
}
@@ -969,6 +979,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
{
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
+ unsigned int ctr;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt = NULL;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1025,6 +1036,13 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
}
}
+ ctr = scsi_get_ua_new_media_ctr(STp->device);
+ if (ctr != STp->new_media_ctr) {
+ STp->new_media_ctr = ctr;
+ new_session = 1;
+ DEBC_printk(STp, "New tape session.");
+ }
+
retval = (STp->buffer)->syscall_result;
if (!retval)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
@@ -2897,7 +2915,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n");
- fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
@@ -2930,14 +2947,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmd_in == MTSETDENSITY) {
(STp->buffer)->b_data[4] = arg;
STp->density_changed = 1; /* At least we tried ;-) */
+ STp->changed_density = arg;
} else if (cmd_in == SET_DENS_AND_BLK)
(STp->buffer)->b_data[4] = arg >> 24;
else
(STp->buffer)->b_data[4] = STp->density;
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
ltmp = arg & MT_ST_BLKSIZE_MASK;
- if (cmd_in == MTSETBLK)
+ if (cmd_in == MTSETBLK) {
STp->blksize_changed = 1; /* At least we tried ;-) */
+ STp->changed_blksize = arg;
+ }
} else
ltmp = STp->block_size;
(STp->buffer)->b_data[9] = (ltmp >> 16);
@@ -3084,7 +3104,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd_in == MTSETDRVBUFFER ||
cmd_in == SET_DENS_AND_BLK) {
if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
- !(STp->use_pf & PF_TESTED)) {
+ cmdstatp->sense_hdr.asc == 0x24 &&
+ (STp->device)->scsi_level <= SCSI_2 &&
+ !(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
@@ -3636,9 +3658,23 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EIO);
goto out;
}
- reset_state(STp);
- /* remove this when the midlevel properly clears was_reset */
- STp->device->was_reset = 0;
+ reset_state(STp); /* Clears pos_unknown */
+
+ /* Fix the device settings after reset, ignore errors */
+ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTEOM) {
+ if (STp->can_partitions) {
+ /* STp->new_partition contains the
+ * latest partition set
+ */
+ STp->partition = 0;
+ switch_partition(STp);
+ }
+ if (STp->density_changed)
+ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
+ if (STp->blksize_changed)
+ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
+ }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
@@ -4122,7 +4158,7 @@ static void validate_options(void)
*/
static int __init st_setup(char *str)
{
- int i, len, ints[5];
+ int i, len, ints[ARRAY_SIZE(parms) + 1];
char *stp;
stp = get_options(str, ARRAY_SIZE(ints), ints);
@@ -4384,6 +4420,9 @@ static int st_probe(struct device *dev)
goto out_idr_remove;
}
+ tpnt->new_media_ctr = scsi_get_ua_new_media_ctr(SDp);
+ tpnt->por_ctr = scsi_get_ua_por_ctr(SDp);
+
dev_set_drvdata(dev, tpnt);
@@ -4665,6 +4704,24 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(options);
+/**
+ * position_lost_in_reset_show - Value 1 indicates that reads, writes, etc.
+ * are blocked because a device reset has occurred and no operation positioning
+ * the tape has been issued.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t position_lost_in_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ struct scsi_tape *STp = STm->tape;
+
+ return sprintf(buf, "%d", STp->pos_unknown);
+}
+static DEVICE_ATTR_RO(position_lost_in_reset);
+
/* Support for tape stats */
/**
@@ -4849,6 +4906,7 @@ static struct attribute *st_dev_attrs[] = {
&dev_attr_default_density.attr,
&dev_attr_default_compression.attr,
&dev_attr_options.attr,
+ &dev_attr_position_lost_in_reset.attr,
NULL,
};
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 1aaaf5369a40..0d7c4b8c2c8a 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -165,6 +165,7 @@ struct scsi_tape {
unsigned char compression_changed;
unsigned char drv_buffer;
unsigned char density;
+ unsigned char changed_density;
unsigned char door_locked;
unsigned char autorew_dev; /* auto-rewind device */
unsigned char rew_at_close; /* rewind necessary at close */
@@ -172,11 +173,16 @@ struct scsi_tape {
unsigned char cleaning_req; /* cleaning requested? */
unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
+ int changed_blksize;
int min_block;
int max_block;
int recover_count; /* From tape opening */
int recover_reg; /* From last status call */
+ /* The saved values of midlevel counters */
+ unsigned int new_media_ctr;
+ unsigned int por_ctr;
+
#if DEBUG
unsigned char write_pending;
int nbr_finished;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 0e81125df8c7..63ed7f9aaa93 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -584,7 +584,7 @@ static void return_abnormal_state(struct st_hba *hba, int status)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static int
-stex_slave_config(struct scsi_device *sdev)
+stex_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
@@ -1481,14 +1481,14 @@ static const struct scsi_host_template driver_template = {
.proc_name = DRV_NAME,
.bios_param = stex_biosparam,
.queuecommand = stex_queuecommand,
- .slave_configure = stex_slave_config,
+ .sdev_configure = stex_sdev_configure,
.eh_abort_handler = stex_abort,
.eh_host_reset_handler = stex_reset,
.this_id = -1,
.dma_boundary = PAGE_SIZE - 1,
};
-static struct pci_device_id stex_pci_tbl[] = {
+static const struct pci_device_id stex_pci_tbl[] = {
/* st_shasta */
{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index b8186feccdf5..d9e59204a9c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater,
/*
* Timeout in seconds for all devices managed by this driver.
*/
-static int storvsc_timeout = 180;
+static const int storvsc_timeout = 180;
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
@@ -768,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
return;
}
- t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0) {
dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
@@ -776,7 +776,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0) {
- dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n",
vstor_packet->operation, vstor_packet->status);
return;
}
@@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return -ETIMEDOUT;
@@ -923,14 +923,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
/*
* Allocate state to manage the sub-channels.
- * We allocate an array based on the numbers of possible CPUs
- * (Hyper-V does not support cpu online/offline).
- * This Array will be sparseley populated with unique
- * channels - primary + sub-channels.
- * We will however populate all the slots to evenly distribute
- * the load.
+ * We allocate an array based on the number of CPU ids. This array
+ * is initially sparsely populated for the CPUs assigned to channels:
+ * primary + sub-channels. As I/Os are initiated by different CPUs,
+ * the slots for all online CPUs are populated to evenly distribute
+ * the load across all channels.
*/
- stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
+ stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *),
GFP_KERNEL);
if (stor_device->stor_chns == NULL)
return -ENOMEM;
@@ -1184,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
storvsc_log_ratelimited(device, loglevel,
- "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
@@ -1351,6 +1350,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
return ret;
ret = storvsc_channel_init(device, is_fc);
+ if (ret)
+ vmbus_close(device->channel);
return ret;
}
@@ -1585,7 +1586,8 @@ static int storvsc_device_alloc(struct scsi_device *sdevice)
return 0;
}
-static int storvsc_device_configure(struct scsi_device *sdevice)
+static int storvsc_sdev_configure(struct scsi_device *sdevice,
+ struct queue_limits *lim)
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
if (ret != 0)
return FAILED;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return TIMEOUT_ERROR;
@@ -1819,6 +1821,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ payload->rangecount = 1;
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
@@ -1887,8 +1890,8 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
- .slave_alloc = storvsc_device_alloc,
- .slave_configure = storvsc_device_configure,
+ .sdev_init = storvsc_device_alloc,
+ .sdev_configure = storvsc_sdev_configure,
.cmd_per_lun = 2048,
.this_id = -1,
/* Ensure there are no gaps in presented sgls */
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index a2560cc807d3..1a6eb72ca281 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -765,7 +765,7 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags)
}
}
-static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
+static int sym53c8xx_sdev_init(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -825,7 +825,8 @@ out:
/*
* Linux entry point for device queue sizing.
*/
-static int sym53c8xx_slave_configure(struct scsi_device *sdev)
+static int sym53c8xx_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
@@ -861,14 +862,14 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
return 0;
}
-static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+static void sym53c8xx_sdev_destroy(struct scsi_device *sdev)
{
struct sym_hcb *np = sym_get_hcb(sdev->host);
struct sym_tcb *tp = &np->target[sdev->id];
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
- /* if slave_alloc returned before allocating a sym_lcb, return */
+ /* if sdev_init returned before allocating a sym_lcb, return */
if (!lp)
return;
@@ -1656,7 +1657,7 @@ static int sym_detach(struct Scsi_Host *shost, struct pci_dev *pdev)
struct sym_hcb *np = sym_get_hcb(shost);
printk("%s: detaching ...\n", sym_name(np));
- del_timer_sync(&np->s.timer);
+ timer_delete_sync(&np->s.timer);
/*
* Reset NCR chip.
@@ -1684,9 +1685,9 @@ static const struct scsi_host_template sym2_template = {
.info = sym53c8xx_info,
.cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
- .slave_alloc = sym53c8xx_slave_alloc,
- .slave_configure = sym53c8xx_slave_configure,
- .slave_destroy = sym53c8xx_slave_destroy,
+ .sdev_init = sym53c8xx_sdev_init,
+ .sdev_configure = sym53c8xx_sdev_configure,
+ .sdev_destroy = sym53c8xx_sdev_destroy,
.eh_abort_handler = sym53c8xx_eh_abort_handler,
.eh_target_reset_handler = sym53c8xx_eh_target_reset_handler,
.eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler,
@@ -2030,7 +2031,7 @@ static struct spi_function_template sym2_transport_functions = {
.get_signalling = sym2_get_signalling,
};
-static struct pci_device_id sym2_id_table[] = {
+static const struct pci_device_id sym2_id_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8471f38b730e..21ce3e940192 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
#include "sd.h"
@@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(map);
else
- blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
}
}
@@ -801,7 +800,7 @@ static const struct scsi_host_template virtscsi_host_template = {
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
.eh_timed_out = virtscsi_eh_timed_out,
- .slave_alloc = virtscsi_device_alloc,
+ .sdev_init = virtscsi_device_alloc,
.dma_boundary = UINT_MAX,
.map_queues = virtscsi_map_queues,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9ec55ddc1204..924025305753 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -735,7 +735,8 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
}
-static int scsifront_sdev_configure(struct scsi_device *sdev)
+static int scsifront_sdev_configure(struct scsi_device *sdev,
+ struct queue_limits *lim)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
@@ -776,8 +777,8 @@ static const struct scsi_host_template scsifront_sht = {
.queuecommand = scsifront_queuecommand,
.eh_abort_handler = scsifront_eh_abort_handler,
.eh_device_reset_handler = scsifront_dev_reset_handler,
- .slave_configure = scsifront_sdev_configure,
- .slave_destroy = scsifront_sdev_destroy,
+ .sdev_configure = scsifront_sdev_configure,
+ .sdev_destroy = scsifront_sdev_destroy,
.cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
.can_queue = VSCSIIF_MAX_REQS,
.this_id = -1,
@@ -1074,8 +1075,8 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
continue;
/*
- * Front device state path, used in slave_configure called
- * on successfull scsi_add_device, and in slave_destroy called
+ * Front device state path, used in sdev_configure called
+ * on successfull scsi_add_device, and in sdev_destroy called
* on remove of a device.
*/
snprintf(info->dev_state_path, sizeof(info->dev_state_path),