summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/commsup.c17
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c38
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c25
-rw-r--r--drivers/scsi/hpsa.c54
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h2
-rw-r--r--drivers/scsi/ipr.c7
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c337
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c262
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c476
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c3
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c17
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/osd/osd_initiator.c9
-rw-r--r--drivers/scsi/osst.c4
-rw-r--r--drivers/scsi/qedf/Makefile2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c190
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h93
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c44
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h85
-rw-r--r--drivers/scsi/qedf/qedf.h23
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h13
-rw-r--r--drivers/scsi/qedf/qedf_els.c25
-rw-r--r--drivers/scsi/qedf/qedf_fip.c5
-rw-r--r--drivers/scsi/qedf/qedf_io.c674
-rw-r--r--drivers/scsi/qedf/qedf_main.c5
-rw-r--r--drivers/scsi/qedi/Makefile2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c16
-rw-r--r--drivers/scsi/qedi/qedi_fw.c1072
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c781
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h117
-rw-r--r--drivers/scsi/qedi/qedi_fw_scsi.h55
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h8
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c20
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qedi/qedi_version.h4
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c748
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c49
-rw-r--r--drivers/scsi/scsi_debugfs.c13
-rw-r--r--drivers/scsi/scsi_debugfs.h4
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c49
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c298
-rw-r--r--drivers/scsi/sd.h8
-rw-r--r--drivers/scsi/sd_zbc.c1
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/snic/snic_scsi.c2
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufs.h22
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c235
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/xen-scsifront.c2
108 files changed, 5106 insertions, 2356 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 230043c1c90f..3c52867dfe28 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1241,16 +1241,15 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
- depends on NVME_FC && NVME_TARGET_FC
select CRC_T10DIF
- help
+ ---help---
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
config SCSI_LPFC_DEBUG_FS
bool "Emulex LightPulse Fibre Channel debugfs Support"
depends on SCSI_LPFC && DEBUG_FS
- help
+ ---help---
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index fc2855565a51..93dbe58c47c8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -166,6 +166,7 @@ scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
+scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o
scsi_mod-y += scsi_trace.o scsi_logging.o
scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d036a806f31c..d281492009fb 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1690,9 +1690,6 @@ struct aac_dev
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
-#define aac_adapter_check_health(dev) \
- (dev)->a_ops.adapter_check_health(dev)
-
#define aac_adapter_restart(dev, bled, reset_type) \
((dev)->a_ops.adapter_restart(dev, bled, reset_type))
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
return capacity;
}
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+ if (unlikely(pci_channel_offline(dev->pdev)))
+ return -1;
+
+ return (dev)->a_ops.adapter_check_health(dev);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d08920d4b92c..7a1b8a2ce658 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1874,7 +1874,8 @@ int aac_check_health(struct aac_dev * aac)
spin_unlock_irqrestore(&aac->fib_lock, flagv);
if (BlinkLED < 0) {
- printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+ printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+ aac->name, BlinkLED);
goto out;
}
@@ -2057,7 +2058,6 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
{
struct hw_fib **hw_fib_p;
struct fib **fib_p;
- int rcode = 1;
hw_fib_p = hw_fib_pool;
fib_p = fib_pool;
@@ -2075,11 +2075,11 @@ static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
}
}
+ /*
+ * Get the actual number of allocated fibs
+ */
num = hw_fib_p - hw_fib_pool;
- if (!num)
- rcode = 0;
-
- return rcode;
+ return num;
}
static void wakeup_fibctx_threads(struct aac_dev *dev,
@@ -2187,7 +2187,6 @@ static void aac_process_events(struct aac_dev *dev)
struct fib *fib;
unsigned long flags;
spinlock_t *t_lock;
- unsigned int rcode;
t_lock = dev->queues->queue[HostNormCmdQueue].lock;
spin_lock_irqsave(t_lock, flags);
@@ -2270,8 +2269,8 @@ static void aac_process_events(struct aac_dev *dev)
* Fill up fib pointer pools with actual fibs
* and hw_fibs
*/
- rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
- if (!rcode)
+ num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+ if (!num)
goto free_mem;
/*
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2e5338dec621..7b0410e0f569 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -468,7 +468,7 @@ err_out:
return -1;
err_blink:
- return (status > 16) & 0xFF;
+ return (status >> 16) & 0xFF;
}
static inline u32 aac_get_vector(struct aac_dev *dev)
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 48e200102221..c01b47e5b55a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@ struct alua_queue_data {
#define ALUA_POLICY_SWITCH_ALL 1
static void alua_rtpg_work(struct work_struct *work);
-static void alua_rtpg_queue(struct alua_port_group *pg,
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force);
static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@ static void alua_rtpg_work(struct work_struct *work)
kref_put(&pg->kref, release_port_group);
}
-static void alua_rtpg_queue(struct alua_port_group *pg,
+/**
+ * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ *
+ * Returns true if and only if alua_rtpg_work() will be called asynchronously.
+ * That function is responsible for calling @qdata->fn().
+ */
+static bool alua_rtpg_queue(struct alua_port_group *pg,
struct scsi_device *sdev,
struct alua_queue_data *qdata, bool force)
{
@@ -870,8 +876,8 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
unsigned long flags;
struct workqueue_struct *alua_wq = kaluad_wq;
- if (!pg)
- return;
+ if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
+ return false;
spin_lock_irqsave(&pg->lock, flags);
if (qdata) {
@@ -884,14 +890,12 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref);
pg->rtpg_sdev = sdev;
- scsi_device_get(sdev);
start_queue = 1;
} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG;
/* Do not queue if the worker is already running */
if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
- sdev = NULL;
start_queue = 1;
}
}
@@ -900,13 +904,17 @@ static void alua_rtpg_queue(struct alua_port_group *pg,
alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags);
- if (start_queue &&
- !queue_delayed_work(alua_wq, &pg->rtpg_work,
- msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
- if (sdev)
- scsi_device_put(sdev);
- kref_put(&pg->kref, release_port_group);
+ if (start_queue) {
+ if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
+ sdev = NULL;
+ else
+ kref_put(&pg->kref, release_port_group);
}
+ if (sdev)
+ scsi_device_put(sdev);
+
+ return true;
}
/*
@@ -1007,11 +1015,13 @@ static int alua_activate(struct scsi_device *sdev,
mutex_unlock(&h->init_mutex);
goto out;
}
- fn = NULL;
rcu_read_unlock();
mutex_unlock(&h->init_mutex);
- alua_rtpg_queue(pg, sdev, qdata, true);
+ if (alua_rtpg_queue(pg, sdev, qdata, true))
+ fn = NULL;
+ else
+ err = SCSI_DH_DEV_OFFLINED;
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index b35ed3829421..2d4b7f049a68 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1289,32 +1289,13 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
|| (cmd > EXPRESS_IOCTL_MAX))
return -ENOTSUPP;
- if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
+ ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
+ if (IS_ERR(ioctl)) {
esas2r_log(ESAS2R_LOG_WARN,
"ioctl_handler access_ok failed for cmd %d, "
"address %p", cmd,
arg);
- return -EFAULT;
- }
-
- /* allocate a kernel memory buffer for the IOCTL data */
- ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
- if (ioctl == NULL) {
- esas2r_log(ESAS2R_LOG_WARN,
- "ioctl_handler kzalloc failed for %zu bytes",
- sizeof(struct atto_express_ioctl));
- return -ENOMEM;
- }
-
- err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
- if (err != 0) {
- esas2r_log(ESAS2R_LOG_WARN,
- "copy_from_user didn't copy everything (err %d, cmd %d)",
- err,
- cmd);
- kfree(ioctl);
-
- return -EFAULT;
+ return PTR_ERR(ioctl);
}
/* verify the signature */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 206b45e4b465..73daace478cb 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2960,7 +2960,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
/* fill_cmd can't fail here, no data buffer to map. */
(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
scsi3addr, TYPE_MSG);
- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
goto out;
@@ -3718,7 +3718,7 @@ exit_failed:
* # (integer code indicating one of several NOT READY states
* describing why a volume is to be kept offline)
*/
-static int hpsa_volume_offline(struct ctlr_info *h,
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
unsigned char scsi3addr[])
{
struct CommandList *c;
@@ -3739,7 +3739,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
DEFAULT_TIMEOUT);
if (rc) {
cmd_free(h, c);
- return 0;
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
}
sense = c->err_info->SenseInfo;
if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
@@ -3750,19 +3750,13 @@ static int hpsa_volume_offline(struct ctlr_info *h,
cmd_status = c->err_info->CommandStatus;
scsi_status = c->err_info->ScsiStatus;
cmd_free(h, c);
- /* Is the volume 'not ready'? */
- if (cmd_status != CMD_TARGET_STATUS ||
- scsi_status != SAM_STAT_CHECK_CONDITION ||
- sense_key != NOT_READY ||
- asc != ASC_LUN_NOT_READY) {
- return 0;
- }
/* Determine the reason for not ready state */
ldstat = hpsa_get_volume_status(h, scsi3addr);
/* Keep volume offline in certain cases: */
switch (ldstat) {
+ case HPSA_LV_FAILED:
case HPSA_LV_UNDERGOING_ERASE:
case HPSA_LV_NOT_AVAILABLE:
case HPSA_LV_UNDERGOING_RPI:
@@ -3784,7 +3778,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
default:
break;
}
- return 0;
+ return HPSA_LV_OK;
}
/*
@@ -3857,10 +3851,10 @@ static int hpsa_update_device_info(struct ctlr_info *h,
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
- /* Inquiry failed (msg printed already) */
dev_err(&h->pdev->dev,
- "hpsa_update_device_info: inquiry failed\n");
- rc = -EIO;
+ "%s: inquiry failed, device will be skipped.\n",
+ __func__);
+ rc = HPSA_INQUIRY_FAILED;
goto bail_out;
}
@@ -3889,15 +3883,20 @@ static int hpsa_update_device_info(struct ctlr_info *h,
if ((this_device->devtype == TYPE_DISK ||
this_device->devtype == TYPE_ZBC) &&
is_logical_dev_addr_mode(scsi3addr)) {
- int volume_offline;
+ unsigned char volume_offline;
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
volume_offline = hpsa_volume_offline(h, scsi3addr);
- if (volume_offline < 0 || volume_offline > 0xff)
- volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
- this_device->volume_offline = volume_offline & 0xff;
+ this_device->volume_offline = volume_offline;
+ if (volume_offline == HPSA_LV_FAILED) {
+ rc = HPSA_LV_FAILED;
+ dev_err(&h->pdev->dev,
+ "%s: LV failed, device will be skipped.\n",
+ __func__);
+ goto bail_out;
+ }
} else {
this_device->raid_level = RAID_UNKNOWN;
this_device->offload_config = 0;
@@ -4383,8 +4382,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
goto out;
}
if (rc) {
- dev_warn(&h->pdev->dev,
- "Inquiry failed, skipping device.\n");
+ h->drv_req_rescan = 1;
continue;
}
@@ -5562,7 +5560,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
spin_lock_irqsave(&h->scan_lock, flags);
h->scan_finished = 1;
- wake_up_all(&h->scan_wait_queue);
+ wake_up(&h->scan_wait_queue);
spin_unlock_irqrestore(&h->scan_lock, flags);
}
@@ -5580,11 +5578,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
if (unlikely(lockup_detected(h)))
return hpsa_scan_complete(h);
+ /*
+ * If a scan is already waiting to run, no need to add another
+ */
+ spin_lock_irqsave(&h->scan_lock, flags);
+ if (h->scan_waiting) {
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+
/* wait until any scan already in progress is finished. */
while (1) {
spin_lock_irqsave(&h->scan_lock, flags);
if (h->scan_finished)
break;
+ h->scan_waiting = 1;
spin_unlock_irqrestore(&h->scan_lock, flags);
wait_event(h->scan_wait_queue, h->scan_finished);
/* Note: We don't need to worry about a race between this
@@ -5594,6 +5604,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
*/
}
h->scan_finished = 0; /* mark scan as in progress */
+ h->scan_waiting = 0;
spin_unlock_irqrestore(&h->scan_lock, flags);
if (unlikely(lockup_detected(h)))
@@ -8796,6 +8807,7 @@ reinit_after_soft_reset:
init_waitqueue_head(&h->event_sync_wait_queue);
mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
+ h->scan_waiting = 0;
pci_set_drvdata(pdev, h);
h->ndevices = 0;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index bf6cdc106654..6f04f2ad4125 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -201,6 +201,7 @@ struct ctlr_info {
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
int scan_finished;
+ u8 scan_waiting : 1;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a584cdf07058..5961705eef76 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -156,6 +156,7 @@
#define CFGTBL_BusType_Fibre2G 0x00000200l
/* VPD Inquiry types */
+#define HPSA_INQUIRY_FAILED 0x02
#define HPSA_VPD_SUPPORTED_PAGES 0x00
#define HPSA_VPD_LV_DEVICE_ID 0x83
#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
@@ -166,6 +167,7 @@
/* Logical volume states */
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
#define HPSA_LV_OK 0x0
+#define HPSA_LV_FAILED 0x01
#define HPSA_LV_NOT_AVAILABLE 0x0b
#define HPSA_LV_UNDERGOING_ERASE 0x0F
#define HPSA_LV_UNDERGOING_RPI 0x12
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b3d1e4426ecc..b0c68d24db01 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6426,7 +6426,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
case IPR_IOASA_IR_DUAL_IOA_DISABLED:
- scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+ /*
+ * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
+ * so SCSI mid-layer and upper layers handle it accordingly.
+ */
+ if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
+ scsi_cmd->result |= (DID_PASSTHROUGH << 16);
break;
case IPR_IOASC_BUS_WAS_RESET:
case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4228aba1f654..bbea8eac9abb 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -387,7 +387,7 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
rc = 0;
}
- tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ current_restore_flags(pflags, PF_MEMALLOC);
return rc;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ec38a18c7fab..dd6828f7f772 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1578,12 +1596,15 @@ check_mgmt:
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012fdeca..87f5e694dbed 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 0bba2e30b4f0..6d7840b096e6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -56,7 +56,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MIN_NVME_SEG_CNT 254
+#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
#define FC_MAX_ADPTMSG 64
#define MAX_HBAEVT 32
+#define MAX_HBAS_NO_RESET 16
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
/* lpfc wait event data ready flag */
-#define LPFC_DATA_READY (1<<0)
+#define LPFC_DATA_READY 0 /* bit 0 */
/* queue dump line buffer size */
#define LPFC_LBUF_SZ 128
@@ -473,6 +474,8 @@ struct lpfc_vport {
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
#define STATIC_VPORT 1
+#define FAWWPN_SET 2
+#define FAWWPN_PARAM_CHG 4
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
@@ -692,6 +695,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
+#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -779,6 +783,7 @@ struct lpfc_hba {
uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
+ uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7c2801b04e18..4830370bfab1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2292,6 +2292,8 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
unsigned int cnt = count;
+ uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
+ u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
/*
* We're doing a simple sanity check for soft_wwpn setting.
@@ -2305,6 +2307,12 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
* here. The intent is to protect against the random user or
* application that is just writing attributes.
*/
+ if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
+ " be enabled: fawwpn is enabled\n");
+ return -EINVAL;
+ }
/* count may include a LF at end of string */
if (buf[cnt-1] == '\n')
@@ -3010,6 +3018,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
+int lpfc_no_hba_reset_cnt;
+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
+
LPFC_ATTR(sli_mode, 0, 0, 3,
"SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
@@ -3309,9 +3323,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
* lpfc_enable_fc4_type: Defines what FC4 types are supported.
* Supported Values: 1 - register just FCP
* 3 - register both FCP and NVME
- * Supported values are [1,3]. Default value is 3
+ * Supported values are [1,3]. Default value is 1
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
"Define fc4 type to register with fabric.");
@@ -3329,7 +3343,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
* percentage will go to NVME.
*/
LPFC_ATTR_R(xri_split, 50, 10, 90,
- "Division of XRI resources between SCSI and NVME");
+ "Division of XRI resources between SCSI and NVME");
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
@@ -4451,7 +4465,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
- for (i = 0; i < phba->io_channel_irqs; i++)
+
+ for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i);
return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 18157d2840a3..a1686c2d863c 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2486,6 +2486,10 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
mbox, *rpi);
else {
*rpi = lpfc_sli4_alloc_rpi(phba);
+ if (*rpi == LPFC_RPI_ALLOC_ERROR) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EBUSY;
+ }
status = lpfc_reg_rpi(phba, phba->pport->vpi,
phba->pport->fc_myDID,
(uint8_t *)&phba->pport->fc_sparam,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 843dd73004da..944b32ca4931 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -24,6 +24,7 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
struct fc_frame_header;
+struct lpfc_nvmet_rcv_ctx;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
@@ -99,7 +100,7 @@ void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *, struct lpfc_nodelist *);
-void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
+struct lpfc_nodelist *lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did);
struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
int lpfc_nlp_put(struct lpfc_nodelist *);
int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
@@ -245,6 +246,10 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
+void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_dmabuf *mp);
+int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
@@ -302,6 +307,8 @@ int lpfc_sli_check_eratt(struct lpfc_hba *);
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli4_handle_received_buffer(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
@@ -384,7 +391,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +561,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *abts_cmpl);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
+extern int lpfc_no_hba_reset_cnt;
+extern unsigned long lpfc_no_hba_reset[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c22bb3f887e1..1487406aea77 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -537,19 +537,53 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
}
}
+static void
+lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * To conserve rpi's, filter out addresses for other
+ * vports on the same physical HBAs.
+ */
+ if (Did != vport->fc_myDID &&
+ (!lpfc_find_vport_by_did(phba, Did) ||
+ vport->cfg_peer_port_login)) {
+ if (!phba->nvmet_support) {
+ /* FCPI/NVMEI path. Process Did */
+ lpfc_prep_node_fc4type(vport, Did, fc4_type);
+ return;
+ }
+ /* NVMET path. NVMET only cares about NVMEI nodes. */
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
+ ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ continue;
+ spin_lock_irq(shost->host_lock);
+ if (ndlp->nlp_DID == Did)
+ ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
+ else
+ ndlp->nlp_flag |= NLP_NVMET_RECOV;
+ spin_unlock_irq(shost->host_lock);
+ }
+ }
+}
+
static int
lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
uint32_t Size)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
(struct lpfc_sli_ct_request *) mp->virt;
- struct lpfc_nodelist *ndlp = NULL;
struct lpfc_dmabuf *mlast, *next_mp;
uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
uint32_t Did, CTentry;
int Cnt;
struct list_head head;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp = NULL;
lpfc_set_disctmo(vport);
vport->num_disc_nodes = 0;
@@ -574,19 +608,7 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
/* Get next DID from NameServer List */
CTentry = *ctptr++;
Did = ((be32_to_cpu(CTentry)) & Mask_DID);
-
- ndlp = NULL;
-
- /*
- * Check for rscn processing or not
- * To conserve rpi's, filter out addresses for other
- * vports on the same physical HBAs.
- */
- if ((Did != vport->fc_myDID) &&
- ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
- vport->cfg_peer_port_login))
- lpfc_prep_node_fc4type(vport, Did, fc4_type);
-
+ lpfc_ns_rsp_audit_did(vport, Did, fc4_type);
if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
goto nsout1;
@@ -596,6 +618,22 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
}
+ /* All GID_FT entries processed. If the driver is running in
+ * in target mode, put impacted nodes into recovery and drop
+ * the RPI to flush outstanding IO.
+ */
+ if (vport->phba->nvmet_support) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
+ continue;
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
+ spin_lock_irq(shost->host_lock);
+ }
+ }
+
nsout1:
list_del(&head);
return 0;
@@ -939,8 +977,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"FC4 x%08x, Data: x%08x x%08x\n",
ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
}
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9f4798e9d938..fce549a91911 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -745,73 +745,102 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
int len = 0;
+ int cnt;
if (phba->nvmet_support) {
if (!phba->targetport)
return len;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"\nNVME Targetport Statistics\n");
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"LS: Rcv %08x Drop %08x Abort %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_drop),
atomic_read(&tgtp->xmt_ls_abort));
if (atomic_read(&tgtp->rcv_ls_req_in) !=
atomic_read(&tgtp->rcv_ls_req_out)) {
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"Rcv LS: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_ls_req_in),
atomic_read(&tgtp->rcv_ls_req_out));
}
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
atomic_read(&tgtp->xmt_ls_rsp_cmpl),
atomic_read(&tgtp->xmt_ls_rsp_error));
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"FCP: Rcv %08x Drop %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_drop));
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"Rcv FCP: in %08x != out %08x\n",
atomic_read(&tgtp->rcv_fcp_cmd_in),
atomic_read(&tgtp->rcv_fcp_cmd_out));
}
- len += snprintf(buf+len, size-len,
- "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n",
+ len += snprintf(buf + len, size - len,
+ "FCP Rsp: read %08x readrsp %08x "
+ "write %08x rsp %08x\n",
atomic_read(&tgtp->xmt_fcp_read),
atomic_read(&tgtp->xmt_fcp_read_rsp),
atomic_read(&tgtp->xmt_fcp_write),
atomic_read(&tgtp->xmt_fcp_rsp));
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"FCP Rsp: abort %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_drop));
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
atomic_read(&tgtp->xmt_fcp_rsp_error),
atomic_read(&tgtp->xmt_fcp_rsp_drop));
- len += snprintf(buf+len, size-len,
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Err %08x Cmpl %08x",
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error),
atomic_read(&tgtp->xmt_abort_cmpl));
- len += snprintf(buf+len, size-len, "\n");
+ len += snprintf(buf + len, size - len, "\n");
+
+ cnt = 0;
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(ctxp, next_ctxp,
+ &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ list) {
+ cnt++;
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ if (cnt) {
+ len += snprintf(buf + len, size - len,
+ "ABORT: %d ctx entries\n", cnt);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(ctxp, next_ctxp,
+ &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ list) {
+ if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
+ break;
+ len += snprintf(buf + len, size - len,
+ "Entry: oxid %x state %x "
+ "flag %x\n",
+ ctxp->oxid, ctxp->state,
+ ctxp->flag);
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ }
} else {
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
@@ -3128,8 +3157,6 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
datqp->queue_id, datqp->entry_count,
datqp->entry_size, datqp->host_index,
datqp->hba_index);
- len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
-
return len;
}
@@ -3653,17 +3680,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
goto pass_check;
}
- /* NVME LS complete queue */
- if (phba->sli4_hba.nvmels_cq &&
- phba->sli4_hba.nvmels_cq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_cq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_cq;
- goto pass_check;
- }
/* FCP complete queue */
if (phba->sli4_hba.fcp_cq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3754,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check;
}
- /* NVME LS work queue */
- if (phba->sli4_hba.nvmels_wq &&
- phba->sli4_hba.nvmels_wq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_wq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_wq;
- goto pass_check;
- }
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -5722,10 +5727,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hba *phba = vport->phba;
- if (vport->disc_trc) {
- kfree(vport->disc_trc);
- vport->disc_trc = NULL;
- }
+ kfree(vport->disc_trc);
+ vport->disc_trc = NULL;
debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
vport->debug_disc_trc = NULL;
@@ -5792,10 +5795,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_readRef); /* readRef */
phba->debug_readRef = NULL;
- if (phba->slow_ring_trc) {
- kfree(phba->slow_ring_trc);
- phba->slow_ring_trc = NULL;
- }
+ kfree(phba->slow_ring_trc);
+ phba->slow_ring_trc = NULL;
/* slow_ring_trace */
debugfs_remove(phba->debug_slow_ring_trc);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index c05f56c3023f..7b7d314af0e0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -44,14 +44,6 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
-enum {
- DUMP_FCP,
- DUMP_NVME,
- DUMP_MBX,
- DUMP_ELS,
- DUMP_NVMELS,
-};
-
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_NVMEKTIME_SIZE 8192
@@ -283,8 +275,22 @@ struct lpfc_idiag {
struct lpfc_idiag_offset offset;
void *ptr_private;
};
+
+#else
+
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+ no_printk(fmt, ##arg)
+
#endif
+enum {
+ DUMP_FCP,
+ DUMP_NVME,
+ DUMP_MBX,
+ DUMP_ELS,
+ DUMP_NVMELS,
+};
+
/* Mask for discovery_trace */
#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f4ff99d95db3..9d5a379f4b15 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -157,6 +157,7 @@ struct lpfc_node_rrq {
#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
+#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2d26440e6f2f..67827e397431 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -603,9 +603,11 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
memcmp(&vport->fabric_portname, &sp->portName,
sizeof(struct lpfc_name)) ||
memcmp(&vport->fabric_nodename, &sp->nodeName,
- sizeof(struct lpfc_name)))
+ sizeof(struct lpfc_name)) ||
+ (vport->vport_flag & FAWWPN_PARAM_CHG)) {
fabric_param_changed = 1;
-
+ vport->vport_flag &= ~FAWWPN_PARAM_CHG;
+ }
/*
* Word 1 Bit 31 in common service parameter is overloaded.
* Word 1 Bit 31 in FLOGI request is multiple NPort request
@@ -895,10 +897,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* Cannot find existing Fabric ndlp, so allocate a
* new one
*/
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
if (!ndlp)
goto fail;
- lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp,
NLP_STE_UNUSED_NODE);
@@ -1364,7 +1365,6 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
int
lpfc_initial_flogi(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
vport->port_state = LPFC_FLOGI;
@@ -1374,10 +1374,9 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
- lpfc_nlp_init(vport, ndlp, Fabric_DID);
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
@@ -1418,17 +1417,15 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
int
lpfc_initial_fdisc(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
/* First look for the Fabric ndlp */
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
- lpfc_nlp_init(vport, ndlp, Fabric_DID);
/* Put ndlp onto node list */
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1564,14 +1561,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
phba->active_rrq_pool);
return ndlp;
}
- new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
+ new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
if (!new_ndlp) {
if (active_rrqs_xri_bitmap)
mempool_free(active_rrqs_xri_bitmap,
phba->active_rrq_pool);
return ndlp;
}
- lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
rc = memcmp(&ndlp->nlp_portname, name,
sizeof(struct lpfc_name));
@@ -2845,10 +2841,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp = lpfc_findnode_did(vport, nportid);
if (!ndlp) {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, nportid);
if (!ndlp)
return 1;
- lpfc_nlp_init(vport, ndlp, nportid);
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
@@ -2938,10 +2933,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp = lpfc_findnode_did(vport, nportid);
if (!ndlp) {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, nportid);
if (!ndlp)
return 1;
- lpfc_nlp_init(vport, ndlp, nportid);
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
@@ -4403,7 +4397,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memset(pcmd, 0, cmdsize);
- *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+ *((uint32_t *)(pcmd)) = elsrspcmd;
pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
@@ -5177,15 +5171,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
- struct lpfc_hba *phba)
+ struct lpfc_vport *vport)
{
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
- memcpy(desc->port_names.wwnn, phba->wwnn,
+ memcpy(desc->port_names.wwnn, &vport->fc_nodename,
sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwpn, phba->wwpn,
+ memcpy(desc->port_names.wwpn, &vport->fc_portname,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5273,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
(len + pcmd), &rdp_context->link_stat);
len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
- (len + pcmd), phba);
+ (len + pcmd), vport);
len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -5867,8 +5861,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
+
+ /* NVME Target mode does not do RSCN Recovery. */
if (vport->phba->nvmet_support)
continue;
+
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -6133,7 +6130,6 @@ int
lpfc_els_handle_rscn(struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp;
- struct lpfc_hba *phba = vport->phba;
/* Ignore RSCN if the port is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
@@ -6157,22 +6153,16 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
&& ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
- /* Good ndlp, issue CT Request to NameServer */
+ /* Good ndlp, issue CT Request to NameServer. Need to
+ * know how many gidfts were issued. If none, then just
+ * flush the RSCN. Otherwise, the outstanding requests
+ * need to complete.
+ */
vport->gidft_inp = 0;
- if (lpfc_issue_gidft(vport) == 0)
- /* Wait for NameServer query cmpl before we can
- * continue
- */
+ if (lpfc_issue_gidft(vport) > 0)
return 1;
} else {
- /* If login to NameServer does not exist, issue one */
- /* Good status, issue PLOGI to NameServer */
- ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- /* Wait for NameServer login cmpl before we can
- continue */
- return 1;
-
+ /* Nameserver login in question. Revalidate. */
if (ndlp) {
ndlp = lpfc_enable_node(vport, ndlp,
NLP_STE_PLOGI_ISSUE);
@@ -6182,12 +6172,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
}
ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
} else {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, NameServer_DID);
if (!ndlp) {
lpfc_els_flush_rscn(vport);
return 0;
}
- lpfc_nlp_init(vport, ndlp, NameServer_DID);
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
}
@@ -7746,11 +7735,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, did);
if (!ndlp)
goto dropit;
-
- lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
newnode = 1;
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
@@ -7968,7 +7955,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
did, vport->port_state, ndlp->nlp_flag);
phba->fc_stat.elsRcvPRLI++;
- if (vport->port_state < LPFC_DISC_AUTH) {
+ if ((vport->port_state < LPFC_DISC_AUTH) &&
+ (vport->fc_flag & FC_FABRIC)) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_exp = LSEXP_NOTHING_MORE;
break;
@@ -8192,7 +8180,6 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
static void
lpfc_start_fdmi(struct lpfc_vport *vport)
{
- struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
/* If this is the first time, allocate an ndlp and initialize
@@ -8201,9 +8188,8 @@ lpfc_start_fdmi(struct lpfc_vport *vport)
*/
ndlp = lpfc_findnode_did(vport, FDMI_DID);
if (!ndlp) {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, FDMI_DID);
if (ndlp) {
- lpfc_nlp_init(vport, ndlp, FDMI_DID);
ndlp->nlp_type |= NLP_FABRIC;
} else {
return;
@@ -8256,7 +8242,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, NameServer_DID);
if (!ndlp) {
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
lpfc_disc_start(vport);
@@ -8267,7 +8253,6 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"0251 NameServer login: no memory\n");
return;
}
- lpfc_nlp_init(vport, ndlp, NameServer_DID);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
if (!ndlp) {
@@ -8371,11 +8356,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT
- && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
- lpfc_issue_init_vfi(vport);
- else
+ if (mb->mbxStatus == MBX_NOT_FINISHED)
+ break;
+ if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+ !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_init_vfi(vport);
+ else
+ lpfc_initial_flogi(vport);
+ } else {
lpfc_initial_fdisc(vport);
+ }
break;
}
} else {
@@ -8764,7 +8755,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
pcmd += sizeof(uint32_t); /* Node Name */
pcmd += sizeof(uint32_t); /* Node Name */
memcpy(pcmd, &vport->fc_nodename, 8);
-
+ memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 194a14d5f8a9..0482c5580331 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(vport->load_flag & FC_UNLOADING) &&
- !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
+ if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
+ lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
bool select_new_fcf;
int rc;
@@ -3001,6 +3002,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
struct lpfc_vport *vport = pmb->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct serv_parm *sp = &vport->fc_sparam;
uint32_t ed_tov;
@@ -3030,6 +3032,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
lpfc_update_vport_wwn(vport);
+ fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
if (vport->port_type == LPFC_PHYSICAL_PORT) {
memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
@@ -3308,6 +3311,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ uint8_t attn_type;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3324,6 +3328,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+ attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
memcpy(&phba->alpa_map[0], mp->virt, 128);
@@ -3336,7 +3341,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
- if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
+ if (attn_type == LPFC_ATT_LINK_UP)
if (phba->fc_eventTag != 0)
lpfc_linkdown(phba);
}
@@ -3352,7 +3357,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->link_events++;
- if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
+ if ((attn_type == LPFC_ATT_LINK_UP) &&
!(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
@@ -3378,8 +3383,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->wait_4_mlo_maint_flg);
}
lpfc_mbx_process_link_up(phba, la);
- } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
- LPFC_ATT_LINK_DOWN) {
+ } else if (attn_type == LPFC_ATT_LINK_DOWN ||
+ attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
if (phba->link_flag & LS_LOOPBACK_MODE)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3388,6 +3393,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
+ else if (attn_type == LPFC_ATT_UNEXP_WWPN)
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1313 Link Down UNEXP WWPN Event x%x received "
+ "Data: x%x x%x x%x x%x x%x\n",
+ la->eventTag, phba->fc_eventTag,
+ phba->pport->port_state, vport->fc_flag,
+ bf_get(lpfc_mbx_read_top_mm, la),
+ bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
@@ -3398,8 +3411,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
- ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
+ attn_type == LPFC_ATT_LINK_UP) {
if (phba->link_state != LPFC_LINK_DOWN) {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -4020,9 +4033,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rdata = rport->dd_data;
/* break the link before dropping the ref */
ndlp->rport = NULL;
- if (rdata && rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
+ if (rdata) {
+ if (rdata->pnode == ndlp)
+ lpfc_nlp_put(ndlp);
+ rdata->pnode = NULL;
+ }
/* drop reference for earlier registeration */
put_device(&rport->dev);
}
@@ -4133,7 +4148,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_hba *phba = vport->phba;
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -4152,14 +4166,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unregister_remote_port(ndlp);
}
- /* Notify the NVME transport of this rport's loss */
- if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
- (vport->phba->nvmet_support == 0) &&
- ((ndlp->nlp_fc4_type & NLP_FC4_NVME) ||
- (ndlp->nlp_DID == Fabric_DID))) {
+ /* Notify the NVME transport of this rport's loss on the
+ * Initiator. For NVME Target, should upcall transport
+ * in the else clause when API available.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- lpfc_nvme_unregister_port(vport, ndlp);
+ if (vport->phba->nvmet_support == 0)
+ lpfc_nvme_unregister_port(vport, ndlp);
}
}
@@ -4344,9 +4358,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
+ (unsigned long)ndlp);
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
@@ -4366,10 +4379,17 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did;
unsigned long flags;
unsigned long *active_rrqs_xri_bitmap = NULL;
+ int rpi = LPFC_RPI_ALLOC_ERROR;
if (!ndlp)
return NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return NULL;
+ }
+
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* The ndlp should not be in memory free mode */
if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -4379,7 +4399,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
- return NULL;
+ goto free_rpi;
}
/* The ndlp should not already be in active mode */
if (NLP_CHK_NODE_ACT(ndlp)) {
@@ -4389,7 +4409,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"usgmap:x%x refcnt:%d\n",
(void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
- return NULL;
+ goto free_rpi;
}
/* Keep the original DID */
@@ -4407,7 +4427,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
@@ -4424,6 +4444,11 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node enable: did:x%x",
ndlp->nlp_DID, 0, 0);
return ndlp;
+
+free_rpi:
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(vport->phba, rpi);
+ return NULL;
}
void
@@ -4606,9 +4631,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
}
@@ -5102,65 +5127,82 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
+ if (vport->phba->nvmet_support)
+ return NULL;
if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
lpfc_rscn_payload_check(vport, did) == 0)
return NULL;
- ndlp = (struct lpfc_nodelist *)
- mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, did);
if (!ndlp)
return NULL;
- lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
- if (vport->phba->nvmet_support)
- return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
return ndlp;
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ if (vport->phba->nvmet_support)
+ return NULL;
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
if (!ndlp)
return NULL;
- if (vport->phba->nvmet_support)
- return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
return ndlp;
}
+ /* The NVME Target does not want to actively manage an rport.
+ * The goal is to allow the target to reset its state and clear
+ * pending IO in preparation for the initiator to recover.
+ */
if ((vport->fc_flag & FC_RSCN_MODE) &&
!(vport->fc_flag & FC_NDISC_ACTIVE)) {
if (lpfc_rscn_payload_check(vport, did)) {
- /* If we've already received a PLOGI from this NPort
- * we don't need to try to discover it again.
- */
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
- return NULL;
/* Since this node is marked for discovery,
* delay timeout is not needed.
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+
+ /* NVME Target mode waits until rport is known to be
+ * impacted by the RSCN before it transitions. No
+ * active management - just go to NPR provided the
+ * node had a valid login.
+ */
if (vport->phba->nvmet_support)
return ndlp;
+
+ /* If we've already received a PLOGI from this NPort
+ * we don't need to try to discover it again.
+ */
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ return NULL;
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
} else
ndlp = NULL;
} else {
- /* If we've already received a PLOGI from this NPort,
- * or we are already in the process of discovery on it,
- * we don't need to try to discover it again.
+ /* If the initiator received a PLOGI from this NPort or if the
+ * initiator is already in the process of discovery on it,
+ * there's no need to try to discover it again.
*/
if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
- ndlp->nlp_flag & NLP_RCV_PLOGI)
+ (!vport->phba->nvmet_support &&
+ ndlp->nlp_flag & NLP_RCV_PLOGI))
return NULL;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
if (vport->phba->nvmet_support)
return ndlp;
+
+ /* Moving to NPR state clears unsolicited flags and
+ * allows for rediscovery
+ */
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5885,16 +5927,31 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
return NULL;
}
-void
-lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- uint32_t did)
+struct lpfc_nodelist *
+lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
{
+ struct lpfc_nodelist *ndlp;
+ int rpi = LPFC_RPI_ALLOC_ERROR;
+
+ if (vport->phba->sli_rev == LPFC_SLI_REV4) {
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return NULL;
+ }
+
+ ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_free_rpi(vport->phba, rpi);
+ return NULL;
+ }
+
memset(ndlp, 0, sizeof (struct lpfc_nodelist));
lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
@@ -5916,7 +5973,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"node init: did:x%x",
ndlp->nlp_DID, 0, 0);
- return;
+ return ndlp;
}
/* This routine releases all resources associated with a specifc NPort's ndlp
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 15ca21484150..26a5647e057e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -509,6 +509,8 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
+#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
+
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -2885,6 +2887,7 @@ struct lpfc_mbx_read_top {
#define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */
#define LPFC_ATT_LINK_UP 0x01 /* Link is up */
#define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */
+#define LPFC_ATT_UNEXP_WWPN 0x06 /* Link is down Unexpected WWWPN */
uint32_t word3;
#define lpfc_mbx_read_top_alpa_granted_SHIFT 24
#define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index cfdb068a3bfc..1d12f2be36bc 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
uint32_t phase;
uint32_t delay_multi;
};
-#define LPFC_MAX_EQ_DELAY 8
+#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
struct sgl_page_pairs {
uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
union {
struct {
uint32_t num_eq;
- struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+ struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
} request;
struct {
uint32_t word0;
@@ -2720,6 +2720,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_iaar_SHIFT 9
+#define lpfc_mbx_rq_ftr_rq_iaar_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaar_WORD word2
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
@@ -3853,6 +3856,7 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3
#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4
#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
+#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
#define lpfc_acqe_fc_la_port_type_SHIFT 6
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
#define lpfc_acqe_fc_la_port_type_WORD word0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ee429d773f3..90ae354a9c45 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -42,6 +42,10 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -52,6 +56,7 @@
#include "lpfc.h"
#include "lpfc_scsi.h"
#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -335,6 +340,9 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
void
lpfc_update_vport_wwn(struct lpfc_vport *vport)
{
+ uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
+ u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
+
/* If the soft name exists then update it using the service params */
if (vport->phba->cfg_soft_wwnn)
u64_to_wwn(vport->phba->cfg_soft_wwnn,
@@ -354,9 +362,25 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
sizeof(struct lpfc_name));
- if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
+ /*
+ * If the port name has changed, then set the Param changes flag
+ * to unreg the login
+ */
+ if (vport->fc_portname.u.wwn[0] != 0 &&
+ memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name)))
+ vport->vport_flag |= FAWWPN_PARAM_CHG;
+
+ if (vport->fc_portname.u.wwn[0] == 0 ||
+ vport->phba->cfg_soft_wwpn ||
+ (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
+ vport->vport_flag & FAWWPN_SET) {
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ vport->vport_flag &= ~FAWWPN_SET;
+ if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
+ vport->vport_flag |= FAWWPN_SET;
+ }
else
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
sizeof(struct lpfc_name));
@@ -1003,8 +1027,10 @@ static int
lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *psb, *psb_next;
+ struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
LIST_HEAD(aborts);
LIST_HEAD(nvme_aborts);
+ LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
@@ -1027,16 +1053,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
- list_for_each_entry(sglq_entry,
- &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
- sglq_entry->state = SGL_FREED;
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
&phba->sli4_hba.lpfc_els_sgl_list);
- if (phba->sli4_hba.nvme_wq)
- list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
- &phba->sli4_hba.lpfc_nvmet_sgl_list);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
/* abts_scsi_buf_list_lock required because worker thread uses this
@@ -1053,6 +1073,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
&nvme_aborts);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ &nvmet_aborts);
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
}
@@ -1066,13 +1088,20 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
- psb->pCmd = NULL;
- psb->status = IOSTAT_SUCCESS;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+ psb->pCmd = NULL;
+ psb->status = IOSTAT_SUCCESS;
+ }
+ spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
+ spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+
+ list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
+ ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ }
}
- spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
- list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
- spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
lpfc_sli4_free_sp_events(phba);
return 0;
@@ -2874,34 +2903,38 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
- int i;
+ int i, rpi;
+ unsigned long flags;
if (phba->sli_rev != LPFC_SLI_REV4)
return;
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL) {
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- if (vports[i]->load_flag & FC_UNLOADING)
- continue;
+ if (vports == NULL)
+ return;
- list_for_each_entry_safe(ndlp, next_ndlp,
- &vports[i]->fc_nodes,
- nlp_listp) {
- if (NLP_CHK_NODE_ACT(ndlp)) {
- ndlp->nlp_rpi =
- lpfc_sli4_alloc_rpi(phba);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x %p\n",
- ndlp->nlp_rpi,
- ndlp->nlp_DID,
- ndlp->nlp_flag,
- ndlp->nlp_usg_map,
- ndlp);
- }
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ if (vports[i]->load_flag & FC_UNLOADING)
+ continue;
+
+ list_for_each_entry_safe(ndlp, next_ndlp,
+ &vports[i]->fc_nodes,
+ nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ rpi = lpfc_sli4_alloc_rpi(phba);
+ if (rpi == LPFC_RPI_ALLOC_ERROR) {
+ spin_lock_irqsave(&phba->ndlp_lock, flags);
+ NLP_CLR_NODE_ACT(ndlp);
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+ continue;
}
+ ndlp->nlp_rpi = rpi;
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "0009 rpi:%x DID:%x "
+ "flg:%x map:%x %p\n", ndlp->nlp_rpi,
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_usg_map, ndlp);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3508,6 +3541,12 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6060 Current allocated SCSI xri-sgl count:%d, "
+ "maximum SCSI xri count:%d (split:%d)\n",
+ phba->sli4_hba.scsi_xri_cnt,
+ phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
+
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
/* max scsi xri shrinked below the allocated scsi buffers */
scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
@@ -3555,6 +3594,44 @@ out_free_mem:
return rc;
}
+static uint64_t
+lpfc_get_wwpn(struct lpfc_hba *phba)
+{
+ uint64_t wwn;
+ int rc;
+ LPFC_MBOXQ_t *mboxq;
+ MAILBOX_t *mb;
+
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq)
+ return (uint64_t)-1;
+
+ /* First get WWN of HBA instance */
+ lpfc_read_nv(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6019 Mailbox failed , mbxCmd x%x "
+ "READ_NV, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return (uint64_t) -1;
+ }
+ mb = &mboxq->u.mb;
+ memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
+ /* wwn is WWPN of HBA instance */
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return be64_to_cpu(wwn);
+ else
+ return (((wwn & 0xffffffff00000000) >> 32) |
+ ((wwn & 0x00000000ffffffff) << 32));
+
+}
+
/**
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3753,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
int error = 0;
+ int i;
+ uint64_t wwn;
+ bool use_no_reset_hba = false;
+
+ wwn = lpfc_get_wwpn(phba);
+
+ for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
+ if (wwn == lpfc_no_hba_reset[i]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6020 Setting use_no_reset port=%llx\n",
+ wwn);
+ use_no_reset_hba = true;
+ break;
+ }
+ }
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
} else {
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (!use_no_reset_hba)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
- shost = scsi_host_alloc(&lpfc_template_s3,
+ shost = scsi_host_alloc(&lpfc_template_no_hr,
sizeof(struct lpfc_vport));
}
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3826,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
- init_timer(&vport->fc_disctmo);
- vport->fc_disctmo.function = lpfc_disc_timeout;
- vport->fc_disctmo.data = (unsigned long)vport;
+ setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
+ (unsigned long)vport);
- init_timer(&vport->els_tmofunc);
- vport->els_tmofunc.function = lpfc_els_timeout;
- vport->els_tmofunc.data = (unsigned long)vport;
+ setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
+ (unsigned long)vport);
- init_timer(&vport->delayed_disc_tmo);
- vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
- vport->delayed_disc_tmo.data = (unsigned long)vport;
+ setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
+ (unsigned long)vport);
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
@@ -4458,9 +4547,15 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
/* Parse and translate link attention fields */
la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
la->eventTag = acqe_fc->event_tag;
- bf_set(lpfc_mbx_read_top_att_type, la,
- LPFC_FC_LA_TYPE_LINK_DOWN);
+ if (phba->sli4_hba.link_state.status ==
+ LPFC_FC_LA_TYPE_UNEXP_WWPN) {
+ bf_set(lpfc_mbx_read_top_att_type, la,
+ LPFC_FC_LA_TYPE_UNEXP_WWPN);
+ } else {
+ bf_set(lpfc_mbx_read_top_att_type, la,
+ LPFC_FC_LA_TYPE_LINK_DOWN);
+ }
/* Invoke the mailbox command callback function */
lpfc_mbx_cmpl_read_topology(phba, pmb);
@@ -4666,10 +4761,9 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
return 0;
- lpfc_nlp_init(vport, ndlp, Fabric_DID);
/* Set the node type */
ndlp->nlp_type |= NLP_FABRIC;
/* Put ndlp onto node list */
@@ -5406,21 +5500,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->luns);
/* MBOX heartbeat timer */
- init_timer(&psli->mbox_tmo);
- psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long) phba;
+ setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
/* Fabric block timer */
- init_timer(&phba->fabric_block_timer);
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
- phba->fabric_block_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
+ (unsigned long)phba);
/* EA polling mode timer */
- init_timer(&phba->eratt_poll);
- phba->eratt_poll.function = lpfc_poll_eratt;
- phba->eratt_poll.data = (unsigned long) phba;
+ setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
+ (unsigned long)phba);
/* Heartbeat timer */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
+ setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
return 0;
}
@@ -5446,9 +5534,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
/* FCP polling mode timer */
- init_timer(&phba->fcp_poll_timer);
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
+ (unsigned long)phba);
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5569,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
@@ -5617,14 +5705,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* Initialize timers used by driver
*/
- init_timer(&phba->rrq_tmr);
- phba->rrq_tmr.function = lpfc_rrq_timeout;
- phba->rrq_tmr.data = (unsigned long)phba;
+ setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
/* FCF rediscover timer */
- init_timer(&phba->fcf.redisc_wait);
- phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
- phba->fcf.redisc_wait.data = (unsigned long)phba;
+ setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
+ (unsigned long)phba);
/*
* Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5791,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5822,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the Abort nvme buffer list used by driver */
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+ /* Fast-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -5765,6 +5854,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
+ /* Initialize mboxq lists. If the early init routines fail
+ * these lists need to be correctly initialized.
+ */
+ INIT_LIST_HEAD(&phba->sli.mboxq);
+ INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
+
/* initialize optic_state to 0xFF */
phba->sli4_hba.lnk_info.optic_state = 0xff;
@@ -5830,6 +5925,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
"READ_NV, mbxStatus x%x\n",
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
rc = -EIO;
goto out_free_bsmbx;
}
@@ -5847,10 +5943,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Check to see if it matches any module parameter */
for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
if (wwn == lpfc_enable_nvmet[i]) {
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6017 NVME Target %016llx\n",
wwn);
phba->nvmet_support = 1; /* a match */
+#else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6021 Can't enable NVME Target."
+ " NVME_TARGET_FC infrastructure"
+ " is not in kernel\n");
+#endif
}
}
}
@@ -6347,7 +6450,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
/* els xri-sgl book keeping */
phba->sli4_hba.els_xri_cnt = 0;
@@ -7748,7 +7851,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
/* Create Fast Path FCP WQs */
wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7779,7 +7882,7 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, io_channel, max;
+ int idx, io_channel;
/*
* Create HBA Record arrays.
@@ -7940,15 +8043,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (lpfc_alloc_nvme_wq_cq(phba, idx))
goto out_error;
- /* allocate MRQ CQs */
- max = phba->cfg_nvme_io_channel;
- if (max < phba->cfg_nvmet_mrq)
- max = phba->cfg_nvmet_mrq;
-
- for (idx = 0; idx < max; idx++)
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
- goto out_error;
-
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
@@ -8170,11 +8264,11 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP cqs */
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
- phba->cfg_fcp_io_channel);
+ phba->cfg_fcp_io_channel);
/* Release FCP wqs */
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
- phba->cfg_fcp_io_channel);
+ phba->cfg_fcp_io_channel);
/* Release FCP CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
@@ -8520,15 +8614,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 %s not allocated\n",
phba->sli4_hba.mbx_cq ?
- "Mailbox WQ" : "Mailbox CQ");
+ "Mailbox WQ" : "Mailbox CQ");
rc = -ENOMEM;
goto out_destroy;
}
rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
- phba->sli4_hba.mbx_cq,
- phba->sli4_hba.mbx_wq,
- NULL, 0, LPFC_MBOX);
+ phba->sli4_hba.mbx_cq,
+ phba->sli4_hba.mbx_wq,
+ NULL, 0, LPFC_MBOX);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
@@ -8712,12 +8806,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
}
- /*
- * Configure EQ delay multipier for interrupt coalescing using
- * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
- */
- for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
+ for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, qidx);
+
return 0;
out_destroy:
@@ -8973,6 +9064,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Pending NVME XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ &cqelist);
+ }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9881,17 +9977,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
int wait_time = 0;
int nvme_xri_cmpl = 1;
+ int nvmet_xri_cmpl = 1;
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
- int nvmet_xri_cmpl =
- list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvme_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ nvmet_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+ }
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
!nvmet_xri_cmpl) {
@@ -9917,9 +10015,12 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvme_xri_cmpl = list_empty(
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ nvmet_xri_cmpl = list_empty(
+ &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+ }
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl = list_empty(
@@ -9928,8 +10029,6 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
els_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
- nvmet_xri_cmpl =
- list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
}
}
@@ -9995,9 +10094,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
+ /* Unset the queues shared with the hardware then release all
+ * allocated resources.
+ */
+ lpfc_sli4_queue_unset(phba);
+ lpfc_sli4_queue_destroy(phba);
+
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
/* Stop the SLI4 device port */
phba->pport->work_port_events = 0;
@@ -10253,6 +10357,7 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
}
/* Initialize and populate the iocb list per host */
+
error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -10400,12 +10505,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
fc_remove_host(shost);
scsi_remove_host(shost);
- /* Perform ndlp cleanup on the physical port. The nvme and nvmet
- * localports are destroyed after to cleanup all transport memory.
- */
lpfc_cleanup(vport);
- lpfc_nvmet_destroy_targetport(phba);
- lpfc_nvme_destroy_localport(vport);
/*
* Bring down the SLI Layer. This step disable all interrupts,
@@ -11003,7 +11103,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct lpfc_hba *phba;
struct lpfc_vport *vport = NULL;
struct Scsi_Host *shost = NULL;
- int error;
+ int error, cnt;
uint32_t cfg_mode, intr_mode;
/* Allocate memory for HBA structure */
@@ -11037,12 +11137,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_unset_pci_mem_s4;
}
- /* Initialize and populate the iocb list per host */
+ cnt = phba->cfg_iocb_cnt * 1024;
+ if (phba->nvmet_support)
+ cnt += phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq;
+ /* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d.\n",
- phba->cfg_iocb_cnt*1024);
- error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
+ "2821 initialize iocb list %d total %d\n",
+ phba->cfg_iocb_cnt, cnt);
+ error = lpfc_init_iocb_list(phba, cnt);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11129,7 +11232,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
if ((phba->nvmet_support == 0) &&
(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
/* Create NVME binding with nvme_fc_transport. This
- * ensures the vport is initialized.
+ * ensures the vport is initialized. If the localport
+ * create fails, it should not unload the driver to
+ * support field issues.
*/
error = lpfc_nvme_create_localport(vport);
if (error) {
@@ -11137,7 +11242,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
"6004 NVME registration failed, "
"error x%x\n",
error);
- goto out_disable_intr;
}
}
@@ -11936,6 +12040,7 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
+ uint32_t wqesize;
/* Create FOF EQ */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
@@ -11956,8 +12061,11 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
phba->sli4_hba.wq_ecount);
+
if (!qdesc)
goto out_error;
@@ -12018,6 +12126,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a928f5187fa4..ce25a18367b5 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2083,9 +2083,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
- if (phba->nvmet_support)
+ if (phba->nvmet_support) {
bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
-
+ /* iaab/iaar NOT set for now */
+ bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
+ bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
+ }
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index c61d8d692ede..5986c7957199 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
- dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
+ dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 061626bdf701..8777c2d5f50d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -361,8 +361,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- /* lpfc_plogi_confirm_nport skips fabric did, handle it here */
- if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ /* For initiators, lpfc_plogi_confirm_nport skips fabric did.
+ * For target mode, execute implicit logo.
+ * Fabric nodes go into NPR.
+ */
+ if (!(ndlp->nlp_type & NLP_FABRIC) &&
+ !(phba->nvmet_support)) {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 609a908ea9db..8008c8205fb6 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
- bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -401,6 +401,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nodelist *ndlp;
struct ulp_bde64 *bpl;
struct lpfc_dmabuf *bmp;
+ uint16_t ntype, nstate;
/* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
@@ -417,11 +418,26 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6043 Could not find node for DID %x\n",
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6051 DID x%06x not an active rport.\n",
pnvme_rport->port_id);
- return 1;
+ return -ENODEV;
+ }
+
+ /* The remote node has to be a mapped nvme target or an
+ * unmapped nvme initiator or it's an error.
+ */
+ ntype = ndlp->nlp_type;
+ nstate = ndlp->nlp_state;
+ if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
+ (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6088 DID x%06x not ready for "
+ "IO. State x%x, Type x%x\n",
+ pnvme_rport->port_id,
+ ndlp->nlp_state, ndlp->nlp_type);
+ return -ENODEV;
}
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
@@ -456,7 +472,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
+ "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
"rsplen:%d %pad %pad\n",
pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -620,15 +636,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* Embed the payload in the last half of the WQE
* WQE words 16-30 get the NVME CMD IU payload
*
- * WQE Word 16 is already setup with flags
- * WQE words 17-19 get payload Words 2-4
+ * WQE words 16-19 get payload Words 1-4
* WQE words 20-21 get payload Words 6-7
* WQE words 22-29 get payload Words 16-23
*/
- wptr = &wqe->words[17]; /* WQE ptr */
+ wptr = &wqe->words[16]; /* WQE ptr */
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
- dptr += 2; /* Skip Words 0-1 in payload */
+ dptr++; /* Skip Word 0 in payload */
+ *wptr++ = *dptr++; /* Word 1 */
*wptr++ = *dptr++; /* Word 2 */
*wptr++ = *dptr++; /* Word 3 */
*wptr++ = *dptr++; /* Word 4 */
@@ -745,6 +761,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct nvme_fc_cmd_iu *cp;
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
uint32_t code;
uint16_t cid, sqhd, data;
@@ -772,9 +789,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6061 rport %p, ndlp %p, DID x%06x ndlp "
- "not ready.\n",
- rport, ndlp, rport->remoteport->port_id);
+ "6061 rport %p, DID x%06x node not ready.\n",
+ rport, rport->remoteport->port_id);
ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
if (!ndlp) {
@@ -853,15 +869,18 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
break;
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6081 NVME Completion Protocol Error: "
- "status x%x result x%x placed x%x\n",
+ "xri %x status x%x result x%x "
+ "placed x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
break;
default:
out_err:
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6072 NVME Completion Error: "
+ "6072 NVME Completion Error: xri %x "
"status x%x result x%x placed x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
nCmd->transferred_length = 0;
@@ -900,6 +919,8 @@ out_err:
phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
}
#endif
+ freqpriv = nCmd->private;
+ freqpriv->nvme_buf = NULL;
nCmd->done(nCmd);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -978,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_WRITE_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_WRITE;
-
phba->fc4NvmeOutputRequests++;
} else {
/* Word 7 */
@@ -1002,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_READ;
-
phba->fc4NvmeInputRequests++;
}
} else {
@@ -1026,9 +1041,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 11 */
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_CMD;
-
phba->fc4NvmeControlRequests++;
}
/*
@@ -1108,12 +1120,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
- phba->cfg_sg_seg_cnt,
+ phba->cfg_nvme_seg_cnt,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0;
return 1;
@@ -1205,6 +1217,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
+ struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
@@ -1283,9 +1296,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* Do not let the IO hang out forever. There is no midlayer issuing
* an abort so inform the FW of the maximum IO pending time.
*/
- pnvme_fcreq->private = (void *)lpfc_ncmd;
+ freqpriv->nvme_buf = lpfc_ncmd;
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
lpfc_ncmd->nrport = rport;
+ lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1333,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_free_nvme_buf;
}
@@ -1412,6 +1426,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
+ struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1422,7 +1437,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
/* Announce entry to new IO submit field. */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6002 Abort Request to rport DID x%06x "
"for nvme_fc_req %p\n",
pnvme_rport->port_id,
@@ -1452,7 +1467,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* The remote node has to be ready to send an abort. */
if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
!(ndlp->nlp_type & NLP_NVME_TARGET)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6048 rport %p, DID x%06x not ready for "
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
@@ -1467,27 +1482,28 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing "
"NVME Req now. hba_flag x%x\n",
phba->hba_flag);
return;
}
- lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private;
+ lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
}
+ nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
@@ -1498,23 +1514,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6143 NVME req mismatch: "
"lpfc_nbuf %p nvmeCmd %p, "
- "pnvme_fcreq %p. Skipping Abort\n",
+ "pnvme_fcreq %p. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
- pnvme_fcreq);
+ pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
/* Don't abort IOs no longer on the pending queue. */
- nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6142 NVME IO req %p not queued - skipping "
- "abort req\n",
- pnvme_fcreq);
+ "abort req xri x%x\n",
+ pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
@@ -1525,21 +1540,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq %p, "
- "lpfc_ncmd %p\n",
- pnvme_fcreq, lpfc_nbuf);
+ "lpfc_ncmd %p xri x%x\n",
+ pnvme_fcreq, lpfc_nbuf,
+ nvmereq_wqe->sli4_xritag);
return;
}
abts_buf = __lpfc_sli_get_iocbq(phba);
if (!abts_buf) {
spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq %p.\n",
- pnvme_fcreq);
+ "Abts req for nvme_fcreq %p xri x%x\n",
+ pnvme_fcreq, nvmereq_wqe->sli4_xritag);
return;
}
@@ -1588,7 +1604,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6137 Failed abts issue_wqe with status x%x "
"for nvme_fcreq %p.\n",
ret_val, pnvme_fcreq);
@@ -1596,8 +1612,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
- "6138 Transport Abort NVME Request Issued for\n"
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6138 Transport Abort NVME Request Issued for "
"ox_id x%x on reqtag x%x\n",
nvmereq_wqe->sli4_xritag,
abts_buf->iotag);
@@ -1626,7 +1642,7 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.local_priv_sz = sizeof(struct lpfc_nvme_lport),
.remote_priv_sz = sizeof(struct lpfc_nvme_rport),
.lsrqst_priv_sz = 0,
- .fcprqst_priv_sz = 0,
+ .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
/**
@@ -1821,10 +1837,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
pdma_phys_sgl1, cur_xritag);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -1854,10 +1870,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
struct lpfc_nvme_buf, list);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -2057,7 +2073,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(phba, ndlp,
lpfc_ncmd->cur_iocbq.sli4_lxritag))
continue;
- list_del(&lpfc_ncmd->list);
+ list_del_init(&lpfc_ncmd->list);
found = 1;
break;
}
@@ -2072,7 +2088,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_test_rrq_active(
phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
continue;
- list_del(&lpfc_ncmd->list);
+ list_del_init(&lpfc_ncmd->list);
found = 1;
break;
}
@@ -2099,7 +2115,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
unsigned long iflag = 0;
lpfc_ncmd->nonsg_phys = 0;
- if (lpfc_ncmd->exch_busy) {
+ if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6310 XB release deferred for "
+ "ox_id x%x on reqtag x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ lpfc_ncmd->cur_iocbq.iotag);
+
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2157,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{
+ int ret = 0;
struct lpfc_hba *phba = vport->phba;
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- int len, ret = 0;
+ int len;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@@ -2149,15 +2172,29 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
- /* For now need + 1 to get around NVME transport logic */
- lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
+ /* Limit to LPFC_MAX_NVME_SEG_CNT.
+ * For now need + 1 to get around NVME transport logic.
+ */
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing sg segment cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else {
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
+#if (IS_ENABLED(CONFIG_NVME_FC))
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport);
+#else
+ ret = -ENOMEM;
+#endif
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
@@ -2173,10 +2210,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
INIT_LIST_HEAD(&lport->rport_list);
vport->nvmei_support = 1;
+ len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
+ vport->phba->total_nvme_bufs += len;
}
- len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
- vport->phba->total_nvme_bufs += len;
return ret;
}
@@ -2193,6 +2230,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{
+#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2246,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
localport);
-
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
/* The last node ref has to get released now before the rport
* private memory area is released by the transport.
@@ -2222,6 +2259,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"6008 rport fail destroy %x\n", ret);
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
}
+
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
@@ -2245,17 +2283,29 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"Failed, status x%x\n",
ret);
}
+#endif
}
void
lpfc_nvme_update_localport(struct lpfc_vport *vport)
{
+#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
localport = vport->localport;
+ if (!localport) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
+ "6710 Update NVME fail. No localport\n");
+ return;
+ }
lport = (struct lpfc_nvme_lport *)localport->private;
-
+ if (!lport) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
+ "6171 Update NVME fail. localP %p, No lport\n",
+ localport);
+ return;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6012 Update NVME lport %p did x%x\n",
localport, vport->fc_myDID);
@@ -2269,12 +2319,13 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6030 bound lport %p to DID x%06x\n",
lport, localport->port_id);
-
+#endif
}
int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#if (IS_ENABLED(CONFIG_NVME_FC))
int ret = 0;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2348,7 +2399,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
-
ret = nvme_fc_register_remoteport(localport, &rpinfo,
&remote_port);
if (!ret) {
@@ -2384,6 +2434,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
}
return ret;
+#else
+ return 0;
+#endif
}
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,11 +2454,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#if (IS_ENABLED(CONFIG_NVME_FC))
int ret;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport;
+ unsigned long wait_tmo;
localport = vport->localport;
@@ -2448,17 +2503,82 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* before proceeding. This guarantees the transport and driver
* have completed the unreg process.
*/
- ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5);
+ wait_tmo = msecs_to_jiffies(5000);
+ ret = wait_for_completion_timeout(&rport->rport_unreg_done,
+ wait_tmo);
if (ret == 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 Unreg nvme wait failed %d\n",
- ret);
+ "6169 Unreg nvme wait timeout\n");
}
}
return;
input_err:
+#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6168: State error: lport %p, rport%p FCID x%06x\n",
+ "6168 State error: lport %p, rport%p FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
+
+/**
+ * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag = 0;
+ int rrq_empty = 0;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list,
+ list) {
+ if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
+ list_del_init(&lpfc_ncmd->list);
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ spin_unlock(
+ &phba->sli4_hba.abts_nvme_buf_list_lock);
+
+ rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ ndlp = lpfc_ncmd->ndlp;
+ if (ndlp) {
+ lpfc_set_rrq_active(
+ phba, ndlp,
+ lpfc_ncmd->cur_iocbq.sli4_lxritag,
+ rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6311 XRI Aborted xri x%x tag x%x "
+ "released\n",
+ xri, lpfc_ncmd->cur_iocbq.iotag);
+
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ if (rrq_empty)
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6312 XRI Aborted xri x%x not found\n", xri);
+
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b2fae5e813f8..ec32f45daa66 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -21,12 +21,7 @@
* included with this package. *
********************************************************************/
-#define LPFC_NVME_MIN_SEGS 16
-#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */
-#define LPFC_NVME_MAX_SEGS 510
-#define LPFC_NVMET_MIN_POSTBUF 16
-#define LPFC_NVMET_DEFAULT_POSTBUF 1024
-#define LPFC_NVMET_MAX_POSTBUF 4096
+#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
@@ -57,6 +52,7 @@ struct lpfc_nvme_buf {
struct list_head list;
struct nvmefc_fcp_req *nvmeCmd;
struct lpfc_nvme_rport *nrport;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
@@ -101,3 +97,7 @@ struct lpfc_nvme_buf {
uint64_t ts_data_nvme;
#endif
};
+
+struct lpfc_nvme_fcpreq_priv {
+ struct lpfc_nvme_buf *nvme_buf;
+};
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c421e1738ee9..94434e621c33 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -71,6 +71,26 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
struct lpfc_nvmet_rcv_ctx *,
uint32_t, uint16_t);
+void
+lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+ unsigned long iflag;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6313 NVMET Defer ctx release xri x%x flg x%x\n",
+ ctxp->oxid, ctxp->flag);
+
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
+ iflag);
+ return;
+ }
+ ctxp->flag |= LPFC_NVMET_CTX_RLS;
+ list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
+}
+
/**
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
* @phba: Pointer to HBA context object.
@@ -139,6 +159,11 @@ lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
struct lpfc_dmabuf *mp)
{
if (ctxp) {
+ if (ctxp->flag)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6314 rq_post ctx xri x%x flag x%x\n",
+ ctxp->oxid, ctxp->flag);
+
if (ctxp->txrdy) {
pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
ctxp->txrdy_phys);
@@ -337,39 +362,55 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
#endif
ctxp = cmdwqe->context2;
+ ctxp->flag &= ~LPFC_NVMET_IO_INP;
+
rsp = &ctxp->ctx.fcp_req;
op = rsp->op;
- ctxp->flag &= ~LPFC_NVMET_IO_INP;
status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
- if (!phba->targetport)
- goto out;
+ if (phba->targetport)
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ else
+ tgtp = NULL;
lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
ctxp->oxid, op, status);
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_error);
+
+ /* pick up SLI4 exhange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ ctxp->flag |= LPFC_NVMET_XBUSY;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
+ ctxp->oxid, status, result);
+ } else {
+ ctxp->flag &= ~LPFC_NVMET_XBUSY;
+ }
+
} else {
rsp->fcp_error = NVME_SC_SUCCESS;
if (op == NVMET_FCOP_RSP)
rsp->transferred_length = rsp->rsplen;
else
rsp->transferred_length = rsp->transfer_length;
- atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
}
-out:
if ((op == NVMET_FCOP_READDATA_RSP) ||
(op == NVMET_FCOP_RSP)) {
/* Sanity check */
ctxp->state = LPFC_NVMET_STE_DONE;
ctxp->entry_cnt++;
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
@@ -408,9 +449,7 @@ out:
if (phba->ktime_on)
lpfc_nvmet_ktime(phba, ctxp);
#endif
- /* Let Abort cmpl repost the context */
- if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
start_clean = offsetof(struct lpfc_iocbq, wqe);
@@ -519,8 +558,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
struct lpfc_iocbq *nvmewqeq;
- unsigned long iflags;
- int rc, id;
+ int rc;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
@@ -530,7 +568,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->ts_nvme_data = ktime_get_ns();
}
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- id = smp_processor_id();
+ int id = smp_processor_id();
ctxp->cpu = id;
if (id < LPFC_CHECK_CPU_CNT)
phba->cpucheck_xmt_io[id]++;
@@ -544,33 +582,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
}
#endif
- if (rsp->op == NVMET_FCOP_ABORT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6103 Abort op: oxri x%x %d cnt %d\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
-
- lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
- "xri x%x state x%x cnt x%x\n",
- ctxp->oxid, ctxp->state, ctxp->entry_cnt);
-
- atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
- ctxp->entry_cnt++;
- ctxp->flag |= LPFC_NVMET_ABORT_OP;
- if (ctxp->flag & LPFC_NVMET_IO_INP)
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- return 0;
- }
-
/* Sanity check */
- if (ctxp->state == LPFC_NVMET_STE_ABORT) {
+ if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
+ (ctxp->state == LPFC_NVMET_STE_ABORT)) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6102 Bad state IO x%x aborted\n",
+ "6102 IO xri x%x aborted\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -580,6 +599,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6152 FCP Drop IO x%x: Prep\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -592,10 +612,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
ctxp->oxid, rsp->op, rsp->rsplen);
- /* For now we take hbalock */
- spin_lock_irqsave(&phba->hbalock, iflags);
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
if (rc == WQE_SUCCESS) {
ctxp->flag |= LPFC_NVMET_IO_INP;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -618,8 +635,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->wqeq->hba_wqidx = 0;
nvmewqeq->context2 = NULL;
nvmewqeq->context3 = NULL;
+ rc = -EBUSY;
aerr:
- return -ENXIO;
+ return rc;
}
static void
@@ -631,10 +649,79 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
complete(&tport->tport_unreg_done);
}
+static void
+lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *req)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_hba *phba = ctxp->phba;
+ unsigned long flags;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6103 Abort op: oxri x%x flg x%x cnt %d\n",
+ ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+
+ lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
+ "xri x%x flg x%x cnt x%x\n",
+ ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+
+ atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
+ ctxp->entry_cnt++;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+
+ /* Since iaab/iaar are NOT set, we need to check
+ * if the firmware is in process of aborting IO
+ */
+ if (ctxp->flag & LPFC_NVMET_XBUSY) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+ return;
+ }
+ ctxp->flag |= LPFC_NVMET_ABORT_OP;
+ if (ctxp->flag & LPFC_NVMET_IO_INP)
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ else
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+}
+
+static void
+lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *rsp)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_hba *phba = ctxp->phba;
+ unsigned long flags;
+ bool aborting = false;
+
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
+ (ctxp->flag & LPFC_NVMET_XBUSY)) {
+ aborting = true;
+ /* let the abort path do the real release */
+ lpfc_nvmet_defer_release(phba, ctxp);
+ }
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
+ lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
+ ctxp->state, 0);
+
+ if (aborting)
+ return;
+
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+}
+
static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op,
+ .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
+ .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -663,14 +750,31 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID;
+ /* Limit to LPFC_MAX_NVME_SEG_CNT.
+ * For now need + 1 to get around NVME transport logic.
+ */
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6400 Reducing sg segment cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else {
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+ lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
- lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
- NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
+ NVMET_FCTGTFEAT_CMD_IN_ISR |
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR;
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
&phba->pcidev->dev,
&phba->targetport);
+#else
+ error = -ENOMEM;
+#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6025 Cannot register NVME targetport "
@@ -731,9 +835,138 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the nvmet xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVMET aborted xri.
+ **/
+void
+lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag = 0;
+ int rrq_empty = 0;
+ bool released = false;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(ctxp, next_ctxp,
+ &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ list) {
+ if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+ continue;
+
+ /* Check if we already received a free context call
+ * and we have completed processing an abort situation.
+ */
+ if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
+ !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
+ list_del(&ctxp->list);
+ released = true;
+ }
+ ctxp->flag &= ~LPFC_NVMET_XBUSY;
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+
+ rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
+ ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
+ lpfc_set_rrq_active(phba, ndlp,
+ ctxp->rqb_buffer->sglq->sli4_lxritag,
+ rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6318 XB aborted %x flg x%x (%x)\n",
+ ctxp->oxid, ctxp->flag, released);
+ if (released)
+ lpfc_nvmet_rq_post(phba, ctxp,
+ &ctxp->rqb_buffer->hbuf);
+ if (rrq_empty)
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+int
+lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr)
+
+{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvmefc_tgt_fcp_req *rsp;
+ uint16_t xri;
+ unsigned long iflag = 0;
+
+ xri = be16_to_cpu(fc_hdr->fh_ox_id);
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(ctxp, next_ctxp,
+ &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
+ list) {
+ if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
+ continue;
+
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ ctxp->flag |= LPFC_NVMET_ABTS_RCV;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
+
+ lpfc_nvmeio_data(phba,
+ "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+ xri, smp_processor_id(), 0);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
+
+ rsp = &ctxp->ctx.fcp_req;
+ nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
+
+ /* Respond with BA_ACC accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
+ return 0;
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
+ xri, smp_processor_id(), 1);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
+
+ /* Respond with BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
+#endif
+ return 0;
+}
+
void
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_tgtport *tgtp;
if (phba->nvmet_support == 0)
@@ -745,6 +978,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
}
phba->targetport = NULL;
+#endif
}
/**
@@ -764,6 +998,7 @@ static void
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct hbq_dmabuf *nvmebuf)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +1079,7 @@ dropit:
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+#endif
}
/**
@@ -865,6 +1101,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
@@ -913,6 +1150,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->rqb_buffer = nvmebuf;
ctxp->entry_cnt = 1;
ctxp->flag = 0;
+ spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on) {
@@ -935,8 +1173,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
}
#endif
- lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
- oxid, size, sid);
+ lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
+ oxid, size, smp_processor_id());
atomic_inc(&tgtp->rcv_fcp_cmd_in);
/*
@@ -955,7 +1193,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
+ "6159 FCP Drop IO x%x: err x%x\n",
ctxp->oxid, rc);
dropit:
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +1208,7 @@ dropit:
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
}
+#endif
}
/**
@@ -1114,7 +1353,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
- bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -1209,11 +1448,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
+ if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "NPORT x%x oxid:x%x cnt %d\n",
+ ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
return NULL;
}
@@ -1445,7 +1684,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
case NVMET_FCOP_RSP:
/* Words 0 - 2 */
- sgel = &rsp->sg[0];
physaddr = rsp->rspdma;
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1566,6 +1804,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, result;
+ unsigned long flags;
+ bool released = false;
ctxp = cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe);
@@ -1574,21 +1814,46 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_inc(&tgtp->xmt_abort_cmpl);
+ ctxp->state = LPFC_NVMET_STE_DONE;
+
+ /* Check if we already received a free context call
+ * and we have completed processing an abort situation.
+ */
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ list_del(&ctxp->list);
+ released = true;
+ }
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
- "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
- ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
+ "6165 ABORT cmpl: xri x%x flg x%x (%d) "
+ "WCQE: %08x %08x %08x %08x\n",
+ ctxp->oxid, ctxp->flag, released,
+ wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- ctxp->state = LPFC_NVMET_STE_DONE;
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ /*
+ * if transport has released ctx, then can reuse it. Otherwise,
+ * will be recycled by transport release call.
+ */
+ if (released)
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
+
+ /* Since iaab/iaar are NOT set, there is no work left.
+ * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * should have been called already.
+ */
}
/**
- * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
+ * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
* @wcqe: Pointer to driver response CQE object.
@@ -1598,12 +1863,14 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* The function frees memory resources used for the NVME commands.
**/
static void
-lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
+ unsigned long flags;
uint32_t status, result;
+ bool released = false;
ctxp = cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe);
@@ -1612,23 +1879,55 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_inc(&tgtp->xmt_abort_cmpl);
+ if (!ctxp) {
+ /* if context is clear, related io alrady complete */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+ return;
+ }
+
+ /* Sanity check */
+ if (ctxp->state != LPFC_NVMET_STE_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6112 ABTS Wrong state:%d oxid x%x\n",
+ ctxp->state, ctxp->oxid);
+ }
+
+ /* Check if we already received a free context call
+ * and we have completed processing an abort situation.
+ */
+ ctxp->state = LPFC_NVMET_STE_DONE;
+ spin_lock_irqsave(&ctxp->ctxlock, flags);
+ if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
+ !(ctxp->flag & LPFC_NVMET_XBUSY)) {
+ list_del(&ctxp->list);
+ released = true;
+ }
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
+ spin_unlock_irqrestore(&ctxp->ctxlock, flags);
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
- ctxp, wcqe->word0, wcqe->total_data_placed,
+ "6316 ABTS cmpl xri x%x flg x%x (%x) "
+ "WCQE: %08x %08x %08x %08x\n",
+ ctxp->oxid, ctxp->flag, released,
+ wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
-
- if (ctxp) {
- /* Sanity check */
- if (ctxp->state != LPFC_NVMET_STE_ABORT) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
- "6112 ABORT Wrong state:%d oxid x%x\n",
- ctxp->state, ctxp->oxid);
- }
- ctxp->state = LPFC_NVMET_STE_DONE;
+ /*
+ * if transport has released ctx, then can reuse it. Otherwise,
+ * will be recycled by transport release call.
+ */
+ if (released)
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
- }
+
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+
+ /* Since iaab/iaar are NOT set, there is no work left.
+ * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
+ * should have been called already.
+ */
}
/**
@@ -1681,10 +1980,14 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
- sid, xri);
+ "6067 ABTS: sid %x xri x%x/x%x\n",
+ sid, xri, ctxp->wqeq->sli4_xritag);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (!ctxp->wqeq) {
+ ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+ ctxp->wqeq->hba_wqidx = 0;
+ }
ndlp = lpfc_findnode_did(phba->pport, sid);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
@@ -1693,7 +1996,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
return 0;
@@ -1790,10 +2093,11 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
- "6160 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ "6160 Drop ABORT - wrong NDLP state x%x.\n",
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
return 0;
}
@@ -1801,9 +2105,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->abort_wqeq) {
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
- "6161 Abort failed: No wqeqs: "
+ "6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
@@ -1811,8 +2116,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->state = LPFC_NVMET_STE_ABORT;
/* Announce entry to new IO submit field. */
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
- "6162 Abort Request to rport DID x%06x "
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6162 ABORT Request to rport DID x%06x "
"for xri x%x x%x\n",
ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
@@ -1828,6 +2133,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"NVME Req now. hba_flag x%x oxid x%x\n",
phba->hba_flag, ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
return 0;
}
@@ -1839,6 +2145,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
"still pending on oxid x%x\n",
ctxp->oxid);
lpfc_sli_release_iocbq(phba, abts_wqeq);
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
return 0;
}
@@ -1886,9 +2193,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
if (rc == WQE_SUCCESS)
return 0;
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
lpfc_sli_release_iocbq(phba, abts_wqeq);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6166 Failed abts issue_wqe with status x%x "
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6166 Failed ABORT issue_wqe with status x%x "
"for oxid x%x.\n",
rc, ctxp->oxid);
return 1;
@@ -1917,8 +2225,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq = ctxp->wqeq;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1928,7 +2236,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index ca96f05c1604..128759fe6650 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -21,9 +21,7 @@
* included with this package. *
********************************************************************/
-#define LPFC_NVMET_MIN_SEGS 16
-#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */
-#define LPFC_NVMET_MAX_SEGS 510
+#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
#define LPFC_NVMET_SUCCESS_LEN 12
/* Used for NVME Target */
@@ -77,10 +75,12 @@ struct lpfc_nvmet_rcv_ctx {
struct nvmefc_tgt_ls_req ls_req;
struct nvmefc_tgt_fcp_req fcp_req;
} ctx;
+ struct list_head list;
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
dma_addr_t txrdy_phys;
+ spinlock_t ctxlock; /* protect flag access */
uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
@@ -97,8 +97,11 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_STE_RSP 4
#define LPFC_NVMET_STE_DONE 5
uint16_t flag;
-#define LPFC_NVMET_IO_INP 1
-#define LPFC_NVMET_ABORT_OP 2
+#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
+#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
+#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
+#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
+#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
struct rqb_dmabuf *rqb_buffer;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9d6384af9fce..54fd0c81ceaf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
-struct scsi_host_template lpfc_template_s3 = {
+struct scsi_host_template lpfc_template_no_hr = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e43e5e23c24b..cf19f4976f5f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,3 +1,4 @@
+
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
start_sglq = sglq;
while (!found) {
if (!sglq)
- return NULL;
+ break;
if (ndlp && ndlp->active_rrqs_xri_bitmap &&
test_bit(sglq->sli4_lxritag,
ndlp->active_rrqs_xri_bitmap)) {
@@ -6337,7 +6338,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
}
/**
- * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block
+ * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
* @phba: pointer to lpfc hba data structure.
* @pring: Pointer to driver SLI ring object.
* @sgl_list: linked link of sgl buffers to post
@@ -12213,6 +12214,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 NVME abort XRI events.
+ **/
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the fcp xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the fcp xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support) {
+ lpfc_sli4_nvmet_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ } else {
+ lpfc_sli4_nvme_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ }
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+ /* Set the nvme xri abort event flag */
+ phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0603 Invalid work queue CQE subtype (x%x)\n",
- cq->subtype);
+ "0603 Invalid CQ subtype %d: "
+ "%08x %08x %08x %08x\n",
+ cq->subtype, wcqe->word0, wcqe->parameter,
+ wcqe->word2, wcqe->word3);
+ lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -13710,7 +13758,10 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
lpfc_free_rq_buffer(queue->phba, queue);
kfree(queue->rqbp);
}
- kfree(queue->pring);
+
+ if (!list_empty(&queue->wq_list))
+ list_del(&queue->wq_list);
+
kfree(queue);
return;
}
@@ -13827,6 +13878,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* @startq: The starting FCP EQ to modify
*
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+ * updated in one mailbox command.
*
* The @phba struct is used to send mailbox command to HBA. The @startq
* is used to get the starting FCP EQ to change.
@@ -13879,7 +13932,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
cnt++;
- if (cnt >= LPFC_MAX_EQ_DELAY)
+ if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -14688,6 +14741,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
switch (wq->entry_size) {
default:
case 64:
@@ -15185,17 +15241,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
drq = drqp[idx];
cq = cqp[idx];
- if (hrq->entry_count != drq->entry_count) {
- status = -EINVAL;
- goto out;
- }
-
/* sanity check on queue memory */
if (!hrq || !drq || !cq) {
status = -ENODEV;
goto out;
}
+ if (hrq->entry_count != drq->entry_count) {
+ status = -EINVAL;
+ goto out;
+ }
+
if (idx == 0) {
bf_set(lpfc_mbx_rq_create_num_pages,
&rq_create->u.request,
@@ -15511,6 +15567,8 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
}
/* Remove wq from any list */
list_del_init(&wq->list);
+ kfree(wq->pring);
+ wq->pring = NULL;
mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
@@ -16463,7 +16521,7 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
* This function sends a basic response to a previous unsol sequence abort
* event after aborting the sequence handling.
**/
-static void
+void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr, bool aborted)
{
@@ -16484,14 +16542,13 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
ndlp = lpfc_findnode_did(vport, sid);
if (!ndlp) {
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, sid);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"1268 Failed to allocate ndlp for "
"oxid:x%x SID:x%x\n", oxid, sid);
return;
}
- lpfc_nlp_init(vport, ndlp, sid);
/* Put ndlp onto pport node list */
lpfc_enqueue_node(vport, ndlp);
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -16640,6 +16697,11 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
}
lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ if (phba->nvmet_support) {
+ lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
+ return;
+ }
+
/* Respond with BA_ACC or BA_RJT accordingly */
lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 91153c9f6d18..da46471337c8 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -620,7 +620,7 @@ struct lpfc_sli4_hba {
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
struct list_head lpfc_nvmet_sgl_list;
- struct list_head lpfc_abts_nvmet_sgl_list;
+ struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
struct lpfc_sglq **lpfc_sglq_active_list;
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
+ struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 86c6c9b26b82..1c26dc67151b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.7"
+#define LPFC_DRIVER_VERSION "11.2.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9a0339dbc024..c714482bf4c5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -738,10 +738,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, allocate one */
- ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ ndlp = lpfc_nlp_init(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
- lpfc_nlp_init(vport, ndlp, Fabric_DID);
/* Indicate free memory when release */
NLP_SET_FREE_REQ(ndlp);
} else {
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 999699d45e14..cdb61eaa2d1f 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -644,7 +644,7 @@ static void __exit mac_esp_exit(void)
}
MODULE_DESCRIPTION("Mac ESP SCSI driver");
-MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
+MODULE_AUTHOR("Finn Thain");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e7e5974e1a2c..2b209bbb4c91 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.701.16.00-rc1"
-#define MEGASAS_RELDATE "February 2, 2017"
+#define MEGASAS_VERSION "07.701.17.00-rc1"
+#define MEGASAS_RELDATE "March 2, 2017"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 7ac9a9ee9bd4..0016f12cc563 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1963,6 +1963,9 @@ scan_target:
if (!mr_device_priv_data)
return -ENOMEM;
sdev->hostdata = mr_device_priv_data;
+
+ atomic_set(&mr_device_priv_data->r1_ldio_hint,
+ instance->r1_ldio_hint_default);
return 0;
}
@@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
&instance->irq_context[j]);
/* Retry irq register for IO_APIC*/
instance->msix_vectors = 0;
- if (is_probe)
+ if (is_probe) {
+ pci_free_irq_vectors(instance->pdev);
return megasas_setup_irqs_ioapic(instance);
- else
+ } else {
return -1;
+ }
}
}
return 0;
@@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
MPI2_REPLY_POST_HOST_INDEX_OFFSET);
}
- i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
- if (i < 0)
- goto fail_setup_irqs;
+ if (!instance->msix_vectors) {
+ i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (i < 0)
+ goto fail_setup_irqs;
+ }
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 29650ba669da..f990ab4d45e1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
cpu_sel = MR_RAID_CTX_CPUSEL_1;
if (is_stream_detected(rctx_g35) &&
- (raid->level == 5) &&
+ ((raid->level == 5) || (raid->level == 6)) &&
(raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
(cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
cpu_sel = MR_RAID_CTX_CPUSEL_0;
@@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
} else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
- atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+ (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
fp_possible = false;
atomic_dec(&instance->fw_outstanding);
if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 7fe7e6ed595b..8981806fb13f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
extern struct sas_function_template mpt3sas_transport_functions;
extern struct scsi_transport_template *mpt3sas_transport_template;
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
/* trigger data externs */
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1a268a685815..a5d872664257 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
- unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
-
- /* In case of bogus fw or device, we could end up having
- * unaligned partial completion. We can force alignment here,
- * then scsi-ml does not need to handle this misbehavior.
- */
- sector_sz = scmd->device->sector_size;
- if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
- xfer_cnt % sector_sz)) {
- sdev_printk(KERN_INFO, scmd->device,
- "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
- xfer_cnt, sector_sz);
- xfer_cnt = round_down(xfer_cnt, sector_sz);
- }
-
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 6903f03c88af..8a1b94816419 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -477,7 +477,7 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
int error)
{
or->async_error = error;
- or->req_errors = req->errors ? : error;
+ or->req_errors = scsi_req(req)->result ? : error;
or->sense_len = scsi_req(req)->sense_len;
if (or->sense_len)
memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
@@ -489,7 +489,10 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
int osd_execute_request(struct osd_request *or)
{
- int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
+ int error;
+
+ blk_execute_rq(or->request->q, NULL, or->request, 0);
+ error = scsi_req(or->request)->result ? -EIO : 0;
_set_error_resid(or, or->request, error);
return error;
@@ -1602,7 +1605,7 @@ static int _init_blk_request(struct osd_request *or,
req->rq_flags |= RQF_QUIET;
req->timeout = or->timeout;
- req->retries = or->retries;
+ scsi_req(req)->retries = or->retries;
if (has_out) {
or->out.req = req;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index c47f4b349bac..67cbed92f07d 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -327,7 +327,7 @@ static void osst_end_async(struct request *req, int update)
struct osst_tape *STp = SRpnt->stp;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
#if DEBUG
STp->write_pending = 0;
#endif
@@ -414,7 +414,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
- req->retries = retries;
+ rq->retries = retries;
req->end_io_data = SRpnt;
blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 64e9f507ce32..414f2a772a5f 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
- qedf_attr.o qedf_els.o
+ qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644
index 000000000000..8c65e3b034dc
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -0,0 +1,190 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID (0xFFFFu)
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+ enum fcoe_sqe_request_type request_type)
+{
+ memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+ request_type);
+ task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32])
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 io_size, val;
+ bool slow_sgl;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+ slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+ task_params->tx_io_size : task_params->rx_io_size);
+
+ /* Ystorm ctx */
+ y_st_ctx = &ctx->ystorm_st_context;
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
+ fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+ /* Tstorm ctx */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+ FCOE_TASK_DEV_TYPE_TAPE :
+ FCOE_TASK_DEV_TYPE_DISK);
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
+
+ /* Ustorm ctx */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Mstorm buffer for sense/rsp data placement */
+ m_st_ctx = &ctx->mstorm_st_context;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+ m_st_ctx->rsp_buf_addr.hi = val;
+ val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+ m_st_ctx->rsp_buf_addr.lo = val;
+
+ if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+ /* Ystorm ctx */
+ y_st_ctx->expect_first_xfer = 1;
+
+ /* Set the amount of super SGEs. Can be up to 4. */
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ sgl_task_params);
+
+ /* Mstorm ctx */
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+ } else {
+ /* Tstorm ctx */
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+ (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+ /* Mstorm ctx */
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ sgl_task_params);
+ }
+
+ init_common_sqe(task_params, SEND_FCOE_CMD);
+ return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header)
+{
+ struct fcoe_task_context *ctx = task_params->context;
+ struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+ struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+ u32 val;
+
+ memset(ctx, 0, sizeof(*(ctx)));
+
+ /* Init Ystorm */
+ y_st_ctx = &ctx->ystorm_st_context;
+ init_scsi_sgl_context(&y_st_ctx->sgl_params,
+ &y_st_ctx->data_desc,
+ tx_sgl_task_params);
+ SET_FIELD(y_st_ctx->sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+ y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+ y_st_ctx->task_type = task_params->task_type;
+ memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+ mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+ /* Init Mstorm */
+ m_st_ctx = &ctx->mstorm_st_context;
+ init_scsi_sgl_context(&m_st_ctx->sgl_params,
+ &m_st_ctx->data_desc,
+ rx_sgl_task_params);
+ SET_FIELD(m_st_ctx->flags,
+ MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+ fw_to_place_fc_header);
+ m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+ /* Init Tstorm */
+ t_st_ctx = &ctx->tstorm_st_context;
+ t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+ val = cpu_to_le32(task_params->cq_rss_number);
+ t_st_ctx->read_only.glbl_q_num = val;
+ t_st_ctx->read_only.task_type = task_params->task_type;
+ SET_FIELD(t_st_ctx->read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+ t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
+
+ /* Init Ustorm */
+ u_ag_ctx = &ctx->ustorm_ag_context;
+ u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+ /* Init SQE */
+ init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+ task_params->sqe->additional_info_union.burst_length =
+ tx_sgl_task_params->total_buffer_size;
+ SET_FIELD(task_params->sqe->flags,
+ FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+ SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+ SCSI_FAST_SGL);
+
+ return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+ return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+ init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+ return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params, u32 off)
+{
+ init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+ task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+ return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644
index 000000000000..617529b058f4
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -0,0 +1,93 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_task_context *context;
+
+ /* Output parameter [set/filled by the HSI function] */
+ struct fcoe_wqe *sqe;
+ enum fcoe_task_type task_type;
+ u32 tx_io_size; /* in bytes */
+ u32 rx_io_size; /* in bytes */
+ u32 conn_cid;
+ u16 itid;
+ u8 cq_rss_number;
+
+ /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+ u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct regpair sense_data_buffer_phys_addr,
+ u32 task_retry_id,
+ u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header - Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+ struct fcoe_task_params *task_params,
+ struct fcoe_tx_mid_path_params *mid_path_fc_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params,
+ u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+ struct fcoe_task_params *task_params,
+ u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644
index 000000000000..11e0cc082ec0
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -0,0 +1,44 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ /* no need to check for sgl_task_params->sgl validity */
+ u8 num_sges_to_init = sgl_task_params->num_sges >
+ SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+ sgl_task_params->num_sges;
+ u8 sge_index;
+ u32 val;
+
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644
index 000000000000..9cb45410bc45
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -0,0 +1,85 @@
+/* QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+
+ /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+ * -> relevant for tx only
+ */
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+
+ /* Enable Connection error upon DIF error (segments with DIF errors are
+ * dropped)
+ */
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+ * pointer to the CDB buffer SGE
+ */
+ struct scsi_sge extended_cdb_sge;
+
+ /* Physical address of sense data buffer for sense data - 256B buffer */
+ struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params);
+#endif
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 96346a1b1515..40aeb6bb96a2 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -26,6 +26,7 @@
#include <linux/qed/qed_ll2_if.h>
#include "qedf_version.h"
#include "qedf_dbg.h"
+#include "drv_fcoe_fw_funcs.h"
/* Helpers to extract upper and lower 32-bits of pointer */
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@@ -59,19 +60,17 @@
#define UPSTREAM_KEEP 1
struct qedf_mp_req {
- uint8_t tm_flags;
-
uint32_t req_len;
void *req_buf;
dma_addr_t req_buf_dma;
- struct fcoe_sge *mp_req_bd;
+ struct scsi_sge *mp_req_bd;
dma_addr_t mp_req_bd_dma;
struct fc_frame_header req_fc_hdr;
uint32_t resp_len;
void *resp_buf;
dma_addr_t resp_buf_dma;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_resp_bd;
dma_addr_t mp_resp_bd_dma;
struct fc_frame_header resp_fc_hdr;
};
@@ -119,6 +118,7 @@ struct qedf_ioreq {
#define QEDF_CMD_IN_CLEANUP 0x2
#define QEDF_CMD_SRR_SENT 0x3
u8 io_req_flags;
+ uint8_t tm_flags;
struct qedf_rport *fcport;
unsigned long flags;
enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct fcoe_task_context *task;
+ struct fcoe_task_params *task_params;
+ struct scsi_sgl_task_params *sgl_task_params;
int idx;
/*
* Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@ struct qedf_rport {
dma_addr_t sq_pbl_dma;
u32 sq_pbl_size;
u32 sid;
-#define QEDF_RPORT_TYPE_DISK 1
-#define QEDF_RPORT_TYPE_TAPE 2
+#define QEDF_RPORT_TYPE_DISK 0
+#define QEDF_RPORT_TYPE_TAPE 1
uint dev_type; /* Disk or tape */
struct list_head peers;
};
@@ -391,7 +393,7 @@ struct qedf_ctx {
struct io_bdt {
struct qedf_ioreq *io_req;
- struct fcoe_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
dma_addr_t bd_tbl_dma;
u16 bd_valid;
};
@@ -400,7 +402,7 @@ struct qedf_cmd_mgr {
struct qedf_ctx *qedf;
u16 idx;
struct io_bdt **io_bdt_pool;
-#define FCOE_PARAMS_NUM_TASKS 4096
+#define FCOE_PARAMS_NUM_TASKS 2048
struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
spinlock_t lock;
atomic_t free_list_cnt;
@@ -465,9 +467,8 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx);
-extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
- u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 23bd70628a2f..7d173f48a81e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
#define QEDF_INFO(pdev, level, fmt, ...) \
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
## __VA_ARGS__)
-
-extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...);
-extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *, ...);
-extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
+__printf(4, 5)
+void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
u32 line, const char *, ...);
-extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(5, 6)
+void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 info, const char *fmt, ...);
/* GRC Dump related defines */
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59f3e5c73a13..c505d41f6dc8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
uint16_t xid;
uint32_t start_time = jiffies / HZ;
uint32_t current_time;
+ struct fcoe_wqe *sqe;
+ unsigned long flags;
+ u16 sqe_idx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
@@ -113,20 +116,25 @@ retry_els:
/* Obtain exchange id */
xid = els_req->xid;
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(els_req, task);
+ qedf_init_mp_task(els_req, task, sqe);
/* Put timer on original I/O request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
-
/* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n");
qedf_ring_doorbell(fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err:
return rc;
}
@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
struct qedf_rport *fcport;
unsigned long flags;
struct qedf_els_cb_arg *cb_arg;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = orig_io_req->fcport;
@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, orig_io_req->xid, 0,
- FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ orig_io_req->task_params->sqe = sqe;
+
+ init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
+ offset);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 868d423380d1..e10b91cc3c62 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
qedf_set_vlan_id(qedf, vid);
/* Inform waiter that it's ok to call fcoe_ctlr_link up() */
- complete(&qedf->fipvlan_compl);
+ if (!completion_done(&qedf->fipvlan_compl))
+ complete(&qedf->fipvlan_compl);
}
}
@@ -203,7 +204,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fd_mac=%pM.\n", __func__, mp->fd_mac);
+ "fd_mac=%pM\n", mp->fd_mac);
ether_addr_copy(cvl_mac, mp->fd_mac);
break;
case FIP_DT_NAME:
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..1d7f90d0adc1 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -96,7 +96,7 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
if (!cmgr->io_bdt_pool)
goto free_cmd_pool;
- bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+ bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@ free_cmd_pool:
for (i = 0; i < num_ios; i++) {
io_req = &cmgr->cmds[i];
+ kfree(io_req->sgl_task_params);
+ kfree(io_req->task_params);
/* Make sure we free per command sense buffer */
if (io_req->sense_buffer)
dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
spin_lock_init(&cmgr->lock);
/*
- * Initialize list of qedf_ioreq.
+ * Initialize I/O request fields.
*/
xid = QEDF_MIN_XID;
@@ -196,6 +198,29 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
GFP_KERNEL);
if (!io_req->sense_buffer)
goto mem_err;
+
+ /* Allocate task parameters to pass to f/w init funcions */
+ io_req->task_params = kzalloc(sizeof(*io_req->task_params),
+ GFP_KERNEL);
+ if (!io_req->task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
+
+ /*
+ * Allocate scatter/gather list info to pass to f/w init
+ * functions.
+ */
+ io_req->sgl_task_params = kzalloc(
+ sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
+ if (!io_req->sgl_task_params) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to allocate sgl_task_params for xid=0x%x\n",
+ i);
+ goto mem_err;
+ }
}
/* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
GFP_KERNEL);
if (!cmgr->io_bdt_pool[i]) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "io_bdt_pool[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc io_bdt_pool[%d].\n", i);
goto mem_err;
}
}
@@ -220,11 +245,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
for (i = 0; i < num_ios; i++) {
bdt_info = cmgr->io_bdt_pool[i];
bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
- QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+ QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
&bdt_info->bd_tbl_dma, GFP_KERNEL);
if (!bdt_info->bd_tbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
- "bdt_tbl[%d].\n", i);
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "Failed to alloc bdt_tbl[%d].\n", i);
goto mem_err;
}
}
@@ -318,6 +343,7 @@ struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
}
bd_tbl->io_req = io_req;
io_req->cmd_type = cmd_type;
+ io_req->tm_flags = 0;
/* Reset sequence offset data */
io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@ static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_ctx *qedf = io_req->fcport->qedf;
- uint64_t sz = sizeof(struct fcoe_sge);
+ uint64_t sz = sizeof(struct scsi_sge);
/* clear tm flags */
- mp_req->tm_flags = 0;
if (mp_req->mp_req_bd) {
dma_free_coherent(&qedf->pdev->dev, sz,
mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@ void qedf_release_cmd(struct kref *ref)
static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
int bd_index)
{
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -398,7 +423,7 @@ static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
frag_size = sg_len;
bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
- bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+ bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
addr += (u64)frag_size;
sg_frags++;
@@ -413,7 +438,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
struct scatterlist *sg;
int byte_count = 0;
int sg_count = 0;
@@ -439,7 +464,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
bd[bd_count].sge_addr.hi = (addr >> 32);
- bd[bd_count].size = (u16)sg_len;
+ bd[bd_count].sge_len = (u16)sg_len;
return ++bd_count;
}
@@ -480,7 +505,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
sg_frags = 1;
bd[bd_count].sge_addr.lo = U64_LO(addr);
bd[bd_count].sge_addr.hi = U64_HI(addr);
- bd[bd_count].size = (uint16_t)sg_len;
+ bd[bd_count].sge_len = (uint16_t)sg_len;
}
bd_count += sg_frags;
@@ -498,7 +523,7 @@ static int qedf_map_sg(struct qedf_ioreq *io_req)
static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
{
struct scsi_cmnd *sc = io_req->sc_cmd;
- struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
int bd_count;
if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
} else {
bd_count = 0;
bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
- bd[0].size = 0;
+ bd[0].sge_len = 0;
}
io_req->bd_tbl->bd_valid = bd_count;
@@ -529,430 +554,223 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
/* 4 bytes: flag info */
fcp_cmnd->fc_pri_ta = 0;
- fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_tm_flags = io_req->tm_flags;
fcp_cmnd->fc_flags = io_req->io_req_flags;
fcp_cmnd->fc_cmdref = 0;
/* Populate data direction */
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
- else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ } else {
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+ else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+ }
fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
/* 16 bytes: CDB information */
- memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
/* 4 bytes: FCP data length */
fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, u32 *ptu_invalidate,
- struct fcoe_task_context *task_ctx)
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+ struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
- union fcoe_data_desc_ctx *data_desc;
- u32 *fcp_cmnd;
+ u8 fcp_cmnd[32];
u32 tmp_fcp_cmnd[8];
- int cnt, i;
- int bd_count;
+ int bd_count = 0;
struct qedf_ctx *qedf = fcport->qedf;
uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
- u8 tmp_sgl_mode = 0;
- u8 mst_sgl_mode = 0;
+ struct regpair sense_data_buffer_phys_addr;
+ u32 tx_io_size = 0;
+ u32 rx_io_size = 0;
+ int i, cnt;
- memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ /* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
+ memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
- task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
- else
+ /* Set task type bassed on DMA directio of command */
+ if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
task_type = FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 1;
- task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
- /* Check if this is required */
- task_ctx->ystorm_st_context.ox_id = io_req->xid;
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* T Storm ag context */
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-
- /* T Storm st context */
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-
- task_ctx->tstorm_st_context.read_only.dev_type =
- FCOE_TASK_DEV_TYPE_DISK;
- task_ctx->tstorm_st_context.read_only.conf_supported = 0;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-
- /* Completion queue for response. */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
- task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
- io_req->data_xfer_len;
- task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
- lport->e_d_tov;
-
- task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
- io_req->fp_idx = cq_idx;
-
- bd_count = bd_tbl->bd_valid;
- if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
- /* Setup WRITE task */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
- data_desc = &task_ctx->ystorm_st_context.data_desc;
-
- if (io_req->use_slowpath) {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges = bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
- } else {
- SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset =
- data_desc->fast.sgl_start_addr.lo &
- (QEDF_PAGE_SIZE - 1);
- if (data_desc->fast.sgl_byte_offset > 0)
- QEDF_ERR(&(qedf->dbg_ctx),
- "byte_offset=%u for xid=0x%x.\n",
- io_req->xid,
- data_desc->fast.sgl_byte_offset);
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
- }
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_WRITE_INITIATOR;
-
- /* M Storm context */
- tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
- YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
- SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
- tmp_sgl_mode);
-
} else {
- /* Setup READ task */
-
- /* M Storm context */
- struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
- task_ctx->mstorm_st_context.fp.data_2_trns_rem =
- io_req->data_xfer_len;
-
- if (io_req->use_slowpath) {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- FCOE_SLOW_SGL);
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->slow.remainder_num_sges =
- bd_count;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+ tx_io_size = io_req->data_xfer_len;
} else {
- SET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
- (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
- FCOE_MUL_FAST_SGES);
-
- if (bd_count == 1) {
- data_desc->single_sge.sge_addr.lo =
- fcoe_bd_tbl->sge_addr.lo;
- data_desc->single_sge.sge_addr.hi =
- fcoe_bd_tbl->sge_addr.hi;
- data_desc->single_sge.size =
- fcoe_bd_tbl->size;
- data_desc->single_sge.is_valid_sge = 0;
- qedf->single_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
- } else {
- data_desc->fast.sgl_start_addr.lo =
- U64_LO(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_start_addr.hi =
- U64_HI(bd_tbl->bd_tbl_dma);
- data_desc->fast.sgl_byte_offset = 0;
- data_desc->fast.task_reuse_cnt =
- io_req->reuse_count;
- io_req->reuse_count++;
- if (io_req->reuse_count == QEDF_MAX_REUSE) {
- *ptu_invalidate = 1;
- io_req->reuse_count = 0;
- }
- qedf->fast_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_FAST_SGE;
- }
+ task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+ rx_io_size = io_req->data_xfer_len;
}
-
- /* Y Storm context */
- task_ctx->ystorm_st_context.expect_first_xfer = 0;
- task_ctx->ystorm_st_context.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
-
- /* T Storm context */
- task_ctx->tstorm_st_context.read_only.task_type =
- FCOE_TASK_TYPE_READ_INITIATOR;
- mst_sgl_mode = GET_FIELD(
- task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
- FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
- mst_sgl_mode);
}
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = task_type;
+ io_req->task_params->tx_io_size = tx_io_size;
+ io_req->task_params->rx_io_size = rx_io_size;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ io_req->task_params->cq_rss_number = cq_idx;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ /* Fill in information for scatter/gather list */
+ if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
+ bd_count = bd_tbl->bd_valid;
+ io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
+ io_req->sgl_task_params->sgl_phys_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->sgl_phys_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ io_req->sgl_task_params->num_sges = bd_count;
+ io_req->sgl_task_params->total_buffer_size =
+ scsi_bufflen(io_req->sc_cmd);
+ io_req->sgl_task_params->small_mid_sge =
+ io_req->use_slowpath;
+ }
+
+ /* Fill in physical address of sense buffer */
+ sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
+ sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
+
/* fill FCP_CMND IU */
- fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
/* Swap fcp_cmnd since FC is big endian */
cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
-
for (i = 0; i < cnt; i++) {
- *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
- fcp_cmnd++;
+ tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
+ }
+ memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
+
+ init_initiator_rw_fcoe_task(io_req->task_params,
+ io_req->sgl_task_params,
+ sense_data_buffer_phys_addr,
+ io_req->task_retry_identifier, fcp_cmnd);
+
+ /* Increment SGL type counters */
+ if (bd_count == 1) {
+ qedf->single_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+ } else if (io_req->use_slowpath) {
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ } else {
+ qedf->fast_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
}
-
- /* M Storm context - Sense buffer */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(io_req->sense_buffer_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(io_req->sense_buffer_dma);
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct fcoe_task_context *task_ctx)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
struct qedf_ctx *qedf = io_req->fcport->qedf;
struct fc_frame_header *fc_hdr;
- enum fcoe_task_type task_type = 0;
- union fcoe_data_desc_ctx *data_desc;
+ struct fcoe_tx_mid_path_params task_fc_hdr;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
- "for cmd_type = %d\n", io_req->cmd_type);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Initializing MP task for cmd_type=%d\n",
+ io_req->cmd_type);
qedf->control_requests++;
- /* Obtain task_type */
- if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
- (io_req->cmd_type == QEDF_ELS)) {
- task_type = FCOE_TASK_TYPE_MIDPATH;
- } else if (io_req->cmd_type == QEDF_ABTS) {
- task_type = FCOE_TASK_TYPE_ABTS;
- }
-
+ memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
io_req->task = task_ctx;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
- task_type);
-
- /* YSTORM only */
- {
- /* Initialize YSTORM task context */
- struct fcoe_tx_mid_path_params *task_fc_hdr =
- &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
- memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
- task_ctx->ystorm_st_context.task_rety_identifier =
- io_req->task_retry_identifier;
-
- /* Init SGL parameters */
- if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
- (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
- data_desc = &task_ctx->ystorm_st_context.data_desc;
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_req_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_req_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- }
-
- fc_hdr = &(mp_req->req_fc_hdr);
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- fc_hdr->fh_ox_id = io_req->xid;
- fc_hdr->fh_rx_id = htons(0xffff);
- } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
- fc_hdr->fh_rx_id = io_req->xid;
- }
+ /* Setup the fields for fcoe_task_params */
+ io_req->task_params->context = task_ctx;
+ io_req->task_params->sqe = sqe;
+ io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
+ io_req->task_params->tx_io_size = io_req->data_xfer_len;
+ /* rx_io_size tells the f/w how large a response buffer we have */
+ io_req->task_params->rx_io_size = PAGE_SIZE;
+ io_req->task_params->conn_cid = fcport->fw_cid;
+ io_req->task_params->itid = io_req->xid;
+ /* Return middle path commands on CQ 0 */
+ io_req->task_params->cq_rss_number = 0;
+ io_req->task_params->is_tape_device = fcport->dev_type;
+
+ fc_hdr = &(mp_req->req_fc_hdr);
+ /* Set OX_ID and RX_ID based on driver task id */
+ fc_hdr->fh_ox_id = io_req->xid;
+ fc_hdr->fh_rx_id = htons(0xffff);
+
+ /* Set up FC header information */
+ task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
+ task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
+ task_fc_hdr.type = fc_hdr->fh_type;
+ task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
+ task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
+ task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
+ task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
+
+ /* Set up s/g list parameters for request buffer */
+ tx_sgl_task_params.sgl = mp_req->mp_req_bd;
+ tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
+ tx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
+ tx_sgl_task_params.small_mid_sge = 0;
+
+ /* Set up s/g list parameters for request buffer */
+ rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
+ rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
+ rx_sgl_task_params.num_sges = 1;
+ /* Set PAGE_SIZE for now since sg element is that size ??? */
+ rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
+ rx_sgl_task_params.small_mid_sge = 0;
- /* Fill FC Header into middle path buffer */
- task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
- task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
- task_fc_hdr->type = fc_hdr->fh_type;
- task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
- task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
- task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
- task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
-
- task_ctx->ystorm_st_context.data_2_trns_rem =
- io_req->data_xfer_len;
- task_ctx->ystorm_st_context.task_type = task_type;
- }
-
- /* TSTORM ONLY */
- {
- task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
- task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
- /* Always send middle-path repsonses on CQ #0 */
- task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
- io_req->fp_idx = 0;
- SET_FIELD(task_ctx->tstorm_ag_context.flags0,
- TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
- PROTOCOLID_FCOE);
- task_ctx->tstorm_st_context.read_only.task_type = task_type;
- SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
- FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
- 1);
- task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
- }
-
- /* MSTORM only */
- {
- if (task_type == FCOE_TASK_TYPE_MIDPATH) {
- /* Initialize task context */
- data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-
- /* Set cache sges address and length */
- data_desc->slow.base_sgl_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- data_desc->slow.base_sgl_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- data_desc->slow.remainder_num_sges = 1;
- data_desc->slow.curr_sge_off = 0;
- data_desc->slow.curr_sgl_index = 0;
- /*
- * Also need to fil in non-fastpath response address
- * for middle path commands.
- */
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
- U64_LO(mp_req->mp_resp_bd_dma);
- task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
- U64_HI(mp_req->mp_resp_bd_dma);
- }
- }
-
- /* USTORM ONLY */
- {
- task_ctx->ustorm_ag_context.global_cq_num = 0;
- }
+ /*
+ * Last arg is 0 as previous code did not set that we wanted the
+ * fc header information.
+ */
+ init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
+ &task_fc_hdr,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params, 0);
- /* I/O stats. Middle path commands always use slow SGEs */
- qedf->slow_sge_ios++;
- io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ /* Midpath requests always consume 1 SGE */
+ qedf->single_sge_ios++;
}
-void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
- enum fcoe_task_type req_type, u32 offset)
+/* Presumed that fcport->rport_lock is held */
+u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
{
- struct fcoe_wqe *sqe;
uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+ u16 rval;
- sqe = &fcport->sq[fcport->sq_prod_idx];
+ rval = fcport->sq_prod_idx;
+ /* Adjust ring index */
fcport->sq_prod_idx++;
fcport->fw_sq_prod_idx++;
if (fcport->sq_prod_idx == total_sqe)
fcport->sq_prod_idx = 0;
- switch (req_type) {
- case FCOE_TASK_TYPE_WRITE_INITIATOR:
- case FCOE_TASK_TYPE_READ_INITIATOR:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
- if (ptu_invalidate)
- SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
- break;
- case FCOE_TASK_TYPE_MIDPATH:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
- break;
- case FCOE_TASK_TYPE_ABTS:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- SEND_FCOE_ABTS_REQUEST);
- break;
- case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_EXCHANGE_CLEANUP);
- break;
- case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
- SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
- FCOE_SEQUENCE_RECOVERY);
- /* NOTE: offset param only used for sequence recovery */
- sqe->additional_info_union.seq_rec_updated_offset = offset;
- break;
- case FCOE_TASK_TYPE_UNSOLICITED:
- break;
- default:
- break;
- }
-
- sqe->task_id = xid;
-
- /* Make sure SQ data is coherent */
- wmb();
-
+ return rval;
}
void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +847,8 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct fcoe_task_context *task_ctx;
u16 xid;
enum fcoe_task_type req_type = 0;
- u32 ptu_invalidate = 0;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +880,16 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EAGAIN;
}
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+
+ /* Obtain free SQE */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
/* Get the task context */
task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
if (!task_ctx) {
@@ -1070,15 +899,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
-
- if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- }
-
- /* Obtain free SQ entry */
- qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+ qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
/* Ring doorbell */
qedf_ring_doorbell(fcport);
@@ -1342,7 +1163,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
} else {
refcount = kref_read(&io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
- "%d:0:%d:%d xid=0x%0x op=0x%02x "
+ "%d:0:%d:%lld xid=0x%0x op=0x%02x "
"lba=%02x%02x%02x%02x cdb_status=%d "
"fcp_resid=0x%x refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1247,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
sc_cmd->result = result << 16;
refcount = kref_read(&io_req->refcount);
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
"sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
"allowed=%d retries=%d refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1661,6 +1482,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
u32 r_a_tov = 0;
int rc = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
r_a_tov = rdata->r_a_tov;
lport = qedf->lport;
@@ -1712,10 +1535,12 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
spin_lock_irqsave(&fcport->rport_lock, flags);
- /* Add ABTS to send queue */
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
- /* Ring doorbell */
+ init_initiator_abort_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1609,8 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int qedf_init_mp_req(struct qedf_ioreq *io_req)
{
struct qedf_mp_req *mp_req;
- struct fcoe_sge *mp_req_bd;
- struct fcoe_sge *mp_resp_bd;
+ struct scsi_sge *mp_req_bd;
+ struct scsi_sge *mp_resp_bd;
struct qedf_ctx *qedf = io_req->fcport->qedf;
dma_addr_t addr;
uint64_t sz;
@@ -1819,7 +1644,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
}
/* Allocate and map mp_req_bd and mp_resp_bd */
- sz = sizeof(struct fcoe_sge);
+ sz = sizeof(struct scsi_sge);
mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
&mp_req->mp_req_bd_dma, GFP_KERNEL);
if (!mp_req->mp_req_bd) {
@@ -1841,7 +1666,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->sge_addr.lo = U64_LO(addr);
mp_req_bd->sge_addr.hi = U64_HI(addr);
- mp_req_bd->size = QEDF_PAGE_SIZE;
+ mp_req_bd->sge_len = QEDF_PAGE_SIZE;
/*
* MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1677,7 @@ int qedf_init_mp_req(struct qedf_ioreq *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->sge_addr.lo = U64_LO(addr);
mp_resp_bd->sge_addr.hi = U64_HI(addr);
- mp_resp_bd->size = QEDF_PAGE_SIZE;
+ mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
return 0;
}
@@ -1895,6 +1720,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
int tmo = 0;
int rc = SUCCESS;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
fcport = io_req->fcport;
if (!fcport) {
@@ -1940,12 +1767,16 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+ io_req->task_params->sqe = sqe;
+
+ init_initiator_cleanup_fcoe_task(io_req->task_params);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1822,15 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct qedf_mp_req *tm_req;
struct fcoe_task_context *task;
- struct fc_frame_header *fc_hdr;
- struct fcp_cmnd *fcp_cmnd;
struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_lport *lport = qedf->lport;
int rc = 0;
uint16_t xid;
- uint32_t sid, did;
int tmo = 0;
unsigned long flags;
+ struct fcoe_wqe *sqe;
+ u16 sqe_idx;
if (!sc_cmd) {
QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1861,14 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Set the return CPU to be the same as the request one */
io_req->cpu = smp_processor_id();
- tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
-
- rc = qedf_init_mp_req(io_req);
- if (rc == FAILED) {
- QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
- "failed\n");
- kref_put(&io_req->refcount, qedf_release_cmd);
- goto reset_tmf_err;
- }
-
/* Set TM flags */
- io_req->io_req_flags = 0;
- tm_req->tm_flags = tm_flags;
+ io_req->io_req_flags = QEDF_READ;
+ io_req->data_xfer_len = 0;
+ io_req->tm_flags = tm_flags;
/* Default is to return a SCSI command when an error occurs */
io_req->return_scsi_cmd_on_abts = true;
- /* Fill FCP_CMND */
- qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
- fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
- memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
- fcp_cmnd->fc_dl = 0;
-
- /* Fill FC header */
- fc_hdr = &(tm_req->req_fc_hdr);
- sid = fcport->sid;
- did = fcport->rdata->ids.port_id;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
- FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = io_req->xid;
@@ -2069,16 +1877,18 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
/* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid);
- qedf_init_mp_task(io_req, task);
init_completion(&io_req->tm_done);
- /* Obtain free SQ entry */
spin_lock_irqsave(&fcport->rport_lock, flags);
- qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
- /* Ring doorbell */
+ sqe_idx = qedf_get_sqe_idx(fcport);
+ sqe = &fcport->sq[sqe_idx];
+ memset(sqe, 0, sizeof(struct fcoe_wqe));
+
+ qedf_init_task(fcport, lport, io_req, task, sqe);
qedf_ring_doorbell(fcport);
+
spin_unlock_irqrestore(&fcport->rport_lock, flags);
tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1972,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *io_req)
{
struct fcoe_cqe_rsp_info *fcp_rsp;
- struct fcoe_cqe_midpath_info *mp_info;
-
-
- /* Get TMF response length from CQE */
- mp_info = &cqe->cqe_info.midpath_info;
- io_req->mp_req.resp_len = mp_info->data_placement_size;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
- "Response len is %d.\n", io_req->mp_req.resp_len);
fcp_rsp = &cqe->cqe_info.rsp_info;
qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d9d7a86b5f8b..cceddd995a4b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
- qedf->bdq_pbl_dma);
+ "BDQ PBL addr=0x%p dma=%pad\n",
+ qedf->bdq_pbl, &qedf->bdq_pbl_dma);
/*
* Populate BDQ PBL with physical and virtual address of individual
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
atomic_set(&qedf->num_offloads, 0);
qedf->stop_io_on_error = false;
pci_set_drvdata(pdev, qedf);
+ init_completion(&qedf->fipvlan_compl);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 2b3e16b24299..90a6925577cc 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_QEDI) := qedi.o
qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
- qedi_dbg.o
+ qedi_dbg.o qedi_fw_api.o
qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index b064c882411c..39d77818a677 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
-int do_not_recover;
+int qedi_do_not_recover;
static struct dentry *qedi_dbg_root;
void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
static ssize_t
qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (!do_not_recover)
- do_not_recover = 1;
+ if (!qedi_do_not_recover)
+ qedi_do_not_recover = 1;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
static ssize_t
qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (do_not_recover)
- do_not_recover = 0;
+ if (qedi_do_not_recover)
+ qedi_do_not_recover = 0;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
if (*ppos)
return 0;
- cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..d6978cbc56f0 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,6 +14,8 @@
#include "qedi.h"
#include "qedi_iscsi.h"
#include "qedi_gbl.h"
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask);
@@ -53,8 +55,8 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
- resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
- resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+ resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
+ resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@ exit_fp_process:
return;
}
-static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
- u16 tid, uint16_t ptu_invalidate, int is_cleanup)
-{
- struct iscsi_wqe *wqe;
- struct iscsi_wqe_field *cont_field;
- struct qedi_endpoint *ep;
- struct scsi_cmnd *sc = task->sc;
- struct iscsi_login_req *login_hdr;
- struct qedi_cmd *cmd = task->dd_data;
-
- login_hdr = (struct iscsi_login_req *)task->hdr;
- ep = qedi_conn->ep;
- wqe = &ep->sq[ep->sq_prod_idx];
-
- memset(wqe, 0, sizeof(*wqe));
-
- ep->sq_prod_idx++;
- ep->fw_sq_prod_idx++;
- if (ep->sq_prod_idx == QEDI_SQ_SIZE)
- ep->sq_prod_idx = 0;
-
- if (is_cleanup) {
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_TASK_CLEANUP);
- wqe->task_id = tid;
- return;
- }
-
- if (ptu_invalidate) {
- SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
- ISCSI_WQE_SET_PTU_INVALIDATE);
- }
-
- cont_field = &wqe->cont_prevtid_union.cont_field;
-
- switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
- case ISCSI_OP_LOGIN:
- case ISCSI_OP_TEXT:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_MIDDLE_PATH);
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- 1);
- cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
- break;
- case ISCSI_OP_LOGOUT:
- case ISCSI_OP_NOOP_OUT:
- case ISCSI_OP_SCSI_TMFUNC:
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- break;
- default:
- if (!sc)
- break;
-
- SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
- ISCSI_WQE_TYPE_NORMAL);
- cont_field->contlen_cdbsize_field =
- (sc->sc_data_direction == DMA_TO_DEVICE) ?
- scsi_bufflen(sc) : 0;
- if (cmd->use_slowpath)
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
- else
- SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
- (sc->sc_data_direction ==
- DMA_TO_DEVICE) ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- break;
- }
-
- wqe->task_id = tid;
- /* Make sure SQ data is coherent */
- wmb();
-}
-
static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
{
struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@ static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
qedi_conn->iscsi_conn_id);
}
+static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
+{
+ struct qedi_endpoint *ep;
+ u16 rval;
+
+ ep = qedi_conn->ep;
+ rval = ep->sq_prod_idx;
+
+ /* Increament SQ index */
+ ep->sq_prod_idx++;
+ ep->fw_sq_prod_idx++;
+ if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+ ep->sq_prod_idx = 0;
+
+ return rval;
+}
+
int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_login_req_hdr login_req_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
- struct iscsi_login_req_hdr *fw_login_req = NULL;
- struct iscsi_cached_sge_ctx *cached_sge = NULL;
- struct iscsi_sge *single_sge = NULL;
- struct iscsi_sge *req_sge = NULL;
- struct iscsi_sge *resp_sge = NULL;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
- s16 ptu_invalidate = 0;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
+ ep = qedi_conn->ep;
login_hdr = (struct iscsi_login_req *)task->hdr;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
- fw_login_req->opcode = login_hdr->opcode;
- fw_login_req->version_min = login_hdr->min_version;
- fw_login_req->version_max = login_hdr->max_version;
- fw_login_req->flags_attr = login_hdr->flags;
- fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
- fw_login_req->isid_d = *((u32 *)login_hdr->isid);
- fw_login_req->tsih = login_hdr->tsih;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_login_req->cid = qedi_conn->iscsi_conn_id;
- fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
- fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
- fw_login_req->exp_stat_sn = 0;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ /* Update header info */
+ login_req_pdu_header.opcode = login_hdr->opcode;
+ login_req_pdu_header.version_min = login_hdr->min_version;
+ login_req_pdu_header.version_max = login_hdr->max_version;
+ login_req_pdu_header.flags_attr = login_hdr->flags;
+ login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
+ login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
+
+ login_req_pdu_header.tsih = login_hdr->tsih;
+ login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(login_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(login_hdr->dlength);
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
+ login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+ login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+ login_req_pdu_header.exp_stat_sn = 0;
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(login_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_login_request_task(&task_params,
+ &login_req_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1181,65 +1127,64 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_logout_req_hdr *fw_logout_req = NULL;
- struct iscsi_task_context *fw_task_ctx = NULL;
+ struct iscsi_logout_req_hdr logout_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
- struct qedi_cmd *qedi_cmd = NULL;
- s16 tid = 0;
- s16 ptu_invalidate = 0;
+ struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
-
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
- fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
- fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
- qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
- fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- fw_logout_req->cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ /* Update header info */
+ logout_pdu_header.opcode = logout_hdr->opcode;
+ logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
+ qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+ logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+ logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+ logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ rval = init_initiator_logout_request_task(&task_params,
+ &logout_pdu_header,
+ NULL, NULL);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1461,9 +1404,9 @@ static void qedi_tmf_work(struct work_struct *work)
get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
qedi_conn->iscsi_conn_id);
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
- do_not_recover);
+ qedi_do_not_recover);
goto abort_ret;
}
@@ -1533,47 +1476,46 @@ ldel_exit:
static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
struct iscsi_task *mtask)
{
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+ struct iscsi_tmf_request_hdr tmf_pdu_header;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_tmf_request_hdr *fw_tmf_request;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- struct qedi_cmd *cmd;
+ struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 tid = 0, ptu_invalidate = 0;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_cmd *cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
+ s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
- qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+ ep = qedi_conn->ep;
- tid = qedi_cmd->task_id;
- qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
- fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
- fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+ qedi_cmd->task_id = tid;
- memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
- fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
- fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
+ /* Update header info */
+ qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+ tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
+ tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+ memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+ tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
cmd = (struct qedi_cmd *)ctask->dd_data;
- fw_tmf_request->rtt =
+ tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
get_itt(tmf_hdr->rtt));
} else {
- fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+ tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
}
- fw_tmf_request->opcode = tmf_hdr->opcode;
- fw_tmf_request->function = tmf_hdr->flags;
- fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
- fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+ tmf_pdu_header.opcode = tmf_hdr->opcode;
+ tmf_pdu_header.function = tmf_hdr->flags;
+ tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
+ tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
- tid, mtask->itt, qedi_conn->iscsi_conn_id);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_tmf_request_task(&task_params,
+ &tmf_pdu_header);
+ if (rval)
+ return -1;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1689,101 +1611,98 @@ int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task)
{
- struct qedi_ctx *qedi = qedi_conn->qedi;
+ struct iscsi_text_request_hdr text_request_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_text_request_hdr *fw_text_request;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
+ struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
text_hdr = (struct iscsi_text *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
if (tid == -1)
return -ENOMEM;
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_text_request =
- &fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
- fw_text_request->opcode = text_hdr->opcode;
- fw_text_request->flags_attr = text_hdr->flags;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ text_request_pdu_header.opcode = text_hdr->opcode;
+ text_request_pdu_header.flags_attr = text_hdr->flags;
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
- fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_text_request->ttt = text_hdr->ttt;
- fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
- fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
- fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
+ text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ text_request_pdu_header.ttt = text_hdr->ttt;
+ text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+ text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+ text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
+
+ /* Fill tx AHS and rx buffer */
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = ntoh24(text_hdr->dlength);
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_text_request_task(&task_params,
+ &text_request_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
- /* Mstorm context */
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- fw_task_ctx->mstorm_st_context.task_type = 0x2;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
-
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- fw_task_ctx->mstorm_st_context.sgl_size = 1;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
- ntoh24(text_hdr->dlength);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(text_hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
- fw_task_ctx->ustorm_st_context.task_type = 0x2;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- /* Add command in active command list */
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
qedi_cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
-
return 0;
}
@@ -1791,58 +1710,62 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct iscsi_task *task,
char *datap, int data_len, int unsol)
{
+ struct iscsi_nop_out_hdr nop_out_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_nop_out_hdr *fw_nop_out;
- struct qedi_cmd *qedi_cmd;
- /* For 6.5 hdr iscsi_hdr */
struct iscsi_nopout *nopout_hdr;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_sge *single_sge;
- struct iscsi_sge *req_sge;
- struct iscsi_sge *resp_sge;
- u32 lun[2];
- s16 ptu_invalidate = 0;
+ struct scsi_sge *req_sge = NULL;
+ struct scsi_sge *resp_sge = NULL;
+ struct qedi_cmd *qedi_cmd;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
- req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
- resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
+ ep = qedi_conn->ep;
tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+ if (tid == -1)
return -ENOMEM;
- }
-
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
qedi_cmd->task_id = tid;
- /* Ystorm context */
- fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
- SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+ /* Update header info */
+ nop_out_pdu_header.opcode = nopout_hdr->opcode;
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+ SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
- memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
- fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
- fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+ memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+ nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
+ nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+ nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
- fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
- fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
- fw_task_ctx->ystorm_st_context.state.local_comp = 1;
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+ nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
+ nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
} else {
- fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
- fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+ nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
spin_unlock(&qedi_conn->list_lock);
}
- fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
- fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
- fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
-
- cached_sge =
- &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_len = req_sge->sge_len;
- cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
- cached_sge->sge.sge_addr.hi =
- (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
- single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
- single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
- single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
- single_sge->sge_len = resp_sge->sge_len;
- fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
- fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
- fw_task_ctx->ustorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
- fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
-
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+ /* Fill tx AHS and rx buffer */
+ if (data_len) {
+ tx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.req_dma_addr);
+ tx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+ tx_sgl_task_params.total_buffer_size = data_len;
+ tx_sgl_task_params.num_sges = 1;
+
+ rx_sgl_task_params.sgl =
+ (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(qedi_conn->gen_pdu.resp_dma_addr);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+ rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+ rx_sgl_task_params.num_sges = 1;
+ }
+
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = 0;
+ task_params.tx_io_size = data_len;
+ task_params.rx_io_size = resp_sge->sge_len;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ rval = init_initiator_nop_out_task(&task_params,
+ &nop_out_pdu_header,
+ &tx_sgl_task_params,
+ &rx_sgl_task_params);
+ if (rval)
+ return -1;
+
qedi_ring_doorbell(qedi_conn);
return 0;
}
@@ -1905,7 +1821,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
int bd_index)
{
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
int frag_size, sg_frags;
sg_frags = 0;
@@ -1938,7 +1854,7 @@ static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
{
struct scsi_cmnd *sc = cmd->scsi_cmd;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
struct scatterlist *sg;
int byte_count = 0;
int bd_count = 0;
@@ -2040,7 +1956,7 @@ static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
if (bd_count == 0)
return;
} else {
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+ struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
bd[0].sge_addr.lo = 0;
bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct iscsi_cmd_hdr cmd_pdu_header;
+ struct scsi_sgl_task_params tx_sgl_task_params;
+ struct scsi_sgl_task_params rx_sgl_task_params;
+ struct scsi_sgl_task_params *prx_sgl = NULL;
+ struct scsi_sgl_task_params *ptx_sgl = NULL;
+ struct iscsi_task_params task_params;
+ struct iscsi_conn_params conn_params;
+ struct scsi_initiator_cmd_params cmd_params;
struct iscsi_task_context *fw_task_ctx;
- struct iscsi_cached_sge_ctx *cached_sge;
- struct iscsi_phys_sgl_ctx *phys_sgl;
- struct iscsi_virt_sgl_ctx *virt_sgl;
- struct ystorm_iscsi_task_st_ctx *yst_cxt;
- struct mstorm_iscsi_task_st_ctx *mst_cxt;
- struct iscsi_sgl *sgl_struct;
- struct iscsi_sge *single_sge;
+ struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
- struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
- enum iscsi_task_type task_type;
- struct iscsi_cmd_hdr *fw_cmd;
- u32 lun[2];
- u32 exp_data;
- u16 cq_idx = smp_processor_id() % qedi->num_queues;
- s16 ptu_invalidate = 0;
+ enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
+ struct qedi_endpoint *ep;
+ u32 scsi_lun[2];
s16 tid = 0;
- u8 num_fast_sgs;
+ u16 sq_idx = 0;
+ u16 cq_idx;
+ int rval = 0;
- tid = qedi_get_task_idx(qedi);
- if (tid == -1)
- return -ENOMEM;
+ ep = qedi_conn->ep;
+ cls_conn = qedi_conn->cls_conn;
+ conn = cls_conn->dd_data;
qedi_iscsi_map_sg_list(cmd);
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
- fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+ tid = qedi_get_task_idx(qedi);
+ if (tid == -1)
+ return -ENOMEM;
+ fw_task_ctx =
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
- cmd->task_id = tid;
- /* Ystorm context */
- fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+ cmd->task_id = tid;
+ memset(&task_params, 0, sizeof(task_params));
+ memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
+ memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+ memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+ memset(&conn_params, 0, sizeof(conn_params));
+ memset(&cmd_params, 0, sizeof(cmd_params));
+
+ cq_idx = smp_processor_id() % qedi->num_queues;
+ /* Update header info */
+ SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
+ ISCSI_ATTR_SIMPLE);
if (sc->sc_data_direction == DMA_TO_DEVICE) {
- if (conn->session->initial_r2t_en) {
- exp_data = min((conn->session->imm_data_en *
- conn->max_xmit_dlength),
- conn->session->first_burst);
- exp_data = min(exp_data, scsi_bufflen(sc));
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- cpu_to_le32(exp_data);
- } else {
- fw_task_ctx->ustorm_ag_context.exp_data_acked =
- min(conn->session->first_burst, scsi_bufflen(sc));
- }
-
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_WRITE, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
} else {
- if (scsi_bufflen(sc))
- SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+ SET_FIELD(cmd_pdu_header.flags_attr,
+ ISCSI_CMD_HDR_READ, 1);
task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
}
- fw_cmd->lun.lo = be32_to_cpu(lun[0]);
- fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+ cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+ cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
qedi_update_itt_map(qedi, tid, task->itt, cmd);
- fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
- fw_cmd->expected_transfer_length = scsi_bufflen(sc);
- fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
- fw_cmd->opcode = hdr->opcode;
- qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
-
- /* Mstorm context */
- fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
- fw_task_ctx->mstorm_st_context.sense_db.hi =
- (u32)((u64)cmd->sense_buffer_dma >> 32);
- fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
- fw_task_ctx->mstorm_st_context.task_type = task_type;
-
- if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
- ptu_invalidate = 1;
- qedi->tid_reuse_count[tid] = 0;
- }
- fw_task_ctx->ystorm_st_context.state.reuse_count =
- qedi->tid_reuse_count[tid];
- fw_task_ctx->mstorm_st_context.reuse_count =
- qedi->tid_reuse_count[tid]++;
-
- /* Ustorm context */
- fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
- fw_task_ctx->ustorm_st_context.exp_data_sn =
- be32_to_cpu(hdr->exp_statsn);
- fw_task_ctx->ustorm_st_context.task_type = task_type;
- fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
- fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-
- SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
- USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
- USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-
- num_fast_sgs = (cmd->io_tbl.sge_valid ?
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid) : 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
-
- fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
- fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
- cmd->io_tbl.sge_valid);
-
- yst_cxt = &fw_task_ctx->ystorm_st_context;
- mst_cxt = &fw_task_ctx->mstorm_st_context;
- /* Tx path */
+ cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+ cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
+ cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
+ cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
+ cmd_pdu_header.opcode = hdr->opcode;
+ qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
+
+ /* Fill tx AHS and rx buffer */
if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
- /* not considering superIO or FastIO */
- if (cmd->io_tbl.sge_valid == 1) {
- cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
- cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
- cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
- cached_sge->sge.sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
- phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- phys_sgl->sgl_base.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES,
- min((u16)QEDI_FAST_SGE_COUNT,
- (u16)cmd->io_tbl.sge_valid));
- virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
- virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
- virt_sgl->sgl_base.hi =
+ tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ tx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ tx_sgl_task_params.sgl_phys_addr.hi =
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- virt_sgl->sgl_initial_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- } else {
- /* Rx path */
- if (cmd->io_tbl.sge_valid == 1) {
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SINGLE_SGE, 1);
- single_sge = &mst_cxt->sgl_union.single_sge;
- single_sge->sge_addr.lo = bd[0].sge_addr.lo;
- single_sge->sge_addr.hi = bd[0].sge_addr.hi;
- single_sge->sge_len = bd[0].sge_len;
- qedi->cached_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 1);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->slow_sgls++;
- } else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
- sgl_struct = &mst_cxt->sgl_union.sgl_struct;
- sgl_struct->sgl_addr.lo =
- (u32)(cmd->io_tbl.sge_tbl_dma);
- sgl_struct->sgl_addr.hi =
- (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
- sgl_struct->byte_offset =
- (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
- SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
- ISCSI_MFLAGS_SLOW_IO, 0);
- SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
- ISCSI_REG1_NUM_FAST_SGES, 0);
- sgl_struct->updated_sge_size = 0;
- sgl_struct->updated_sge_offset = 0;
- qedi->fast_sgls++;
- }
- fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
- fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
- }
-
- if (cmd->io_tbl.sge_valid == 1)
- /* Singel-SGL */
- qedi->use_cached_sge = true;
- else {
+ tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
if (cmd->use_slowpath)
- qedi->use_slow_sge = true;
- else
- qedi->use_fast_sge = true;
- }
+ tx_sgl_task_params.small_mid_sge = true;
+ } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+ rx_sgl_task_params.sgl_phys_addr.lo =
+ (u32)(cmd->io_tbl.sge_tbl_dma);
+ rx_sgl_task_params.sgl_phys_addr.hi =
+ (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+ rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+ rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
+ }
+
+ /* Add conn param */
+ conn_params.first_burst_length = conn->session->first_burst;
+ conn_params.max_send_pdu_length = conn->max_xmit_dlength;
+ conn_params.max_burst_length = conn->session->max_burst;
+ if (conn->session->initial_r2t_en)
+ conn_params.initial_r2t = true;
+ if (conn->session->imm_data_en)
+ conn_params.immediate_data = true;
+
+ /* Add cmd params */
+ cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
+ cmd_params.sense_data_buffer_phys_addr.hi =
+ (u32)((u64)cmd->sense_buffer_dma >> 32);
+ /* Fill fw input params */
+ task_params.context = fw_task_ctx;
+ task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+ task_params.itid = tid;
+ task_params.cq_rss_number = cq_idx;
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
+ task_params.tx_io_size = scsi_bufflen(sc);
+ else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
+ task_params.rx_io_size = scsi_bufflen(sc);
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+ task_params.sqe = &ep->sq[sq_idx];
+
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
- "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+ "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
(task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
"Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
"Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
- (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+ (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
+ (u32)(cmd->io_tbl.sge_tbl_dma),
(u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
- /* Add command in active command list */
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+ if (task_params.tx_io_size != 0)
+ ptx_sgl = &tx_sgl_task_params;
+ if (task_params.rx_io_size != 0)
+ prx_sgl = &rx_sgl_task_params;
+
+ rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
+ &cmd_params, &cmd_pdu_header,
+ ptx_sgl, prx_sgl,
+ NULL);
+ if (rval)
+ return -1;
+
spin_lock(&qedi_conn->list_lock);
list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
cmd->io_cmd_in_list = true;
qedi_conn->active_cmd_count++;
spin_unlock(&qedi_conn->list_lock);
- qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
qedi_ring_doorbell(qedi_conn);
- if (qedi_io_tracing)
- qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
-
return 0;
}
int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
{
+ struct iscsi_task_params task_params;
+ struct qedi_endpoint *ep;
struct iscsi_conn *conn = task->conn;
struct qedi_conn *qedi_conn = conn->dd_data;
struct qedi_cmd *cmd = task->dd_data;
- s16 ptu_invalidate = 0;
+ u16 sq_idx = 0;
+ int rval = 0;
QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
cmd->task_id, get_itt(task->itt), task->state,
cmd->state, qedi_conn->iscsi_conn_id);
- qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
- qedi_ring_doorbell(qedi_conn);
+ memset(&task_params, 0, sizeof(task_params));
+ ep = qedi_conn->ep;
+
+ sq_idx = qedi_get_wqe_idx(qedi_conn);
+
+ task_params.sqe = &ep->sq[sq_idx];
+ memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+ task_params.itid = cmd->task_id;
+ rval = init_cleanup_task(&task_params);
+ if (rval)
+ return rval;
+
+ qedi_ring_doorbell(qedi_conn);
return 0;
}
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644
index 000000000000..fd354d4e03eb
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -0,0 +1,781 @@
+/* QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct scsi_sgl_task_params *sgl_task_params)
+{
+ u8 sge_index;
+ u8 num_sges;
+ u32 val;
+
+ num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+ SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+ /* sgl params */
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->total_buffer_size);
+ ctx_sgl_params->sgl_total_length = val;
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges; sge_index++) {
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+ val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+ ctx_data_desc->sge[sge_index].sge_len = val;
+ }
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 io_size;
+
+ if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_TARGET_READ)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (!io_size)
+ return 0;
+
+ if (!dif_task_params)
+ return io_size;
+
+ return !dif_task_params->dif_on_network ?
+ io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (!dif_task_params)
+ return;
+
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+ dif_task_params->dif_block_size_log);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_dif_task_params *dif_task_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_initiator_cmd_params *cmd_params,
+ enum iscsi_task_type task_type,
+ bool is_cleanup)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+ if (is_cleanup) {
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_TASK_CLEANUP);
+ return;
+ }
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ init_dif_context_flags(&task_params->sqe->prot_flags,
+ dif_task_params);
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (task_params->tx_io_size) {
+ buf_size = calc_rw_task_size(task_params, task_type,
+ sgl_task_params,
+ dif_task_params);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+ buf_size);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ }
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_NORMAL);
+
+ if (GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CDB_SIZE,
+ cmd_params->extended_cdb_sge.sge_len);
+ break;
+ case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+ case ISCSI_TASK_TYPE_MIDPATH:
+ {
+ bool advance_statsn = true;
+
+ if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_LOGIN);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+ ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+ u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+ ISCSI_COMMON_HDR_OPCODE);
+
+ if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+ (opcode != ISCSI_OPCODE_NOP_IN ||
+ pdu_header->itt == ISCSI_TTT_ALL_ONES))
+ advance_statsn = false;
+ }
+
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+ advance_statsn ? 1 : 0);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+ if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ ISCSI_WQE_NUM_SGES_SLOWIO);
+ else
+ SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+ min(sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+ struct data_hdr *pdu_header,
+ enum iscsi_task_type task_type)
+{
+ struct iscsi_task_context *context;
+ u16 index;
+ u32 val;
+
+ context = task_params->context;
+ memset(context, 0, sizeof(*context));
+
+ for (index = 0; index <
+ ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+ index++) {
+ val = cpu_to_le32(pdu_header->data[index]);
+ context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+ }
+
+ context->mstorm_st_context.task_type = task_type;
+ context->mstorm_ag_context.task_cid =
+ cpu_to_le16(task_params->conn_icid);
+
+ SET_FIELD(context->ustorm_ag_context.flags1,
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+ context->ustorm_st_context.task_type = task_type;
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+ struct scsi_initiator_cmd_params *cmd)
+{
+ union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+ u32 val;
+
+ if (!cmd->extended_cdb_sge.sge_len)
+ return;
+
+ SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+ ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+ cmd->extended_cdb_sge.sge_len);
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+ val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+ ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len,
+ u8 num_sges, bool tx_dif_conn_err_en)
+{
+ u32 val;
+
+ ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ val = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_cxt->exp_data_transfer_len = val;
+ SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_cxt->flags2,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+ struct iscsi_conn_params *conn_params,
+ enum iscsi_task_type task_type,
+ u32 task_size,
+ u32 exp_data_transfer_len,
+ u8 total_ahs_length)
+{
+ u32 max_unsolicited_data = 0, val;
+
+ if (total_ahs_length &&
+ (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+ SET_FIELD(context->ustorm_st_context.flags2,
+ USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+ switch (task_type) {
+ case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+ if (!conn_params->initial_r2t)
+ max_unsolicited_data = conn_params->first_burst_length;
+ else if (conn_params->immediate_data)
+ max_unsolicited_data =
+ min(conn_params->first_burst_length,
+ conn_params->max_send_pdu_length);
+
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32(total_ahs_length == 0 ?
+ min(exp_data_transfer_len,
+ max_unsolicited_data) :
+ ((u32)(total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE)));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_READ:
+ val = cpu_to_le32(exp_data_transfer_len);
+ context->ustorm_ag_context.exp_data_acked = val;
+ break;
+ case ISCSI_TASK_TYPE_INITIATOR_READ:
+ context->ustorm_ag_context.exp_data_acked =
+ cpu_to_le32((total_ahs_length == 0 ? 0 :
+ total_ahs_length +
+ ISCSI_AHS_CNTL_SIZE));
+ break;
+ case ISCSI_TASK_TYPE_TARGET_WRITE:
+ val = cpu_to_le32(task_size);
+ context->ustorm_ag_context.exp_cont_len = val;
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+ struct tdif_task_context *tdif_context,
+ struct scsi_dif_task_params *dif_task_params,
+ enum iscsi_task_type task_type)
+{
+ u32 val;
+
+ if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+ return;
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+ rdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ rdif_context->initial_ref_tag = val;
+ rdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+ SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+ SET_FIELD(rdif_context->flags0,
+ RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_network) ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(rdif_context->flags1,
+ RDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(rdif_context->state,
+ RDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag);
+ }
+
+ if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+ task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+ tdif_context->app_tag_value =
+ cpu_to_le16(dif_task_params->application_tag);
+ tdif_context->partial_crc_valueB =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ tdif_context->partial_crc_value_a =
+ cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+ SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+ dif_task_params->crc_seed ? 1 : 0);
+
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+ dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+ dif_task_params->forward_guard ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+ dif_task_params->forward_app_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+ dif_task_params->forward_ref_tag ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+ dif_task_params->dif_block_size_log - 9);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+ dif_task_params->dif_on_host ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+ dif_task_params->dif_on_network ? 1 : 0);
+ val = cpu_to_le32(dif_task_params->initial_ref_tag);
+ tdif_context->initial_ref_tag = val;
+ tdif_context->app_tag_mask =
+ cpu_to_le16(dif_task_params->application_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+ dif_task_params->host_guard_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+ dif_task_params->protection_type);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+ dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+ dif_task_params->keep_ref_tag_const ? 1 : 0);
+ SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+ (dif_task_params->validate_guard &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+ (dif_task_params->validate_app_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+ (dif_task_params->validate_ref_tag &&
+ dif_task_params->dif_on_host) ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+ dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+ dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+ SET_FIELD(tdif_context->flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK,
+ dif_task_params->ref_tag_mask);
+ SET_FIELD(tdif_context->flags0,
+ TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+ dif_task_params->ignore_app_tag ? 1 : 0);
+ }
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+ enum iscsi_task_type task_type,
+ struct iscsi_conn_params *conn_params,
+ struct iscsi_common_hdr *pdu_header,
+ struct scsi_sgl_task_params *sgl_task_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ u32 exp_data_transfer_len = conn_params->max_burst_length;
+ struct iscsi_task_context *cxt;
+ bool slow_io = false;
+ u32 task_size, val;
+ u8 num_sges = 0;
+
+ task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+ dif_task_params);
+
+ init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+ task_type);
+
+ cxt = task_params->context;
+
+ val = cpu_to_le32(task_size);
+ cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+ init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+ cmd_params);
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+ cxt->mstorm_st_context.sense_db.lo = val;
+
+ val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+ cxt->mstorm_st_context.sense_db.hi = val;
+
+ if (task_params->tx_io_size) {
+ init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+
+ slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+
+ num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+
+ if (slow_io) {
+ SET_FIELD(cxt->ystorm_st_context.state.flags,
+ YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+ dif_task_params);
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min_t(u16, sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ ISCSI_WQE_NUM_SGES_SLOWIO;
+ cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ if (exp_data_transfer_len > task_size ||
+ task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+ exp_data_transfer_len = task_size;
+
+ init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+ &task_params->context->ustorm_ag_context,
+ task_size, exp_data_transfer_len, num_sges,
+ dif_task_params ?
+ dif_task_params->tx_dif_conn_err_en : false);
+
+ set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+ task_type, task_size,
+ exp_data_transfer_len,
+ GET_FIELD(pdu_header->hdr_second_dword,
+ ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+ if (dif_task_params)
+ init_rtdif_task_context(&task_params->context->rdif_context,
+ &task_params->context->tdif_context,
+ dif_task_params, task_type);
+
+ init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+ cmd_params, task_type, false);
+
+ return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params)
+{
+ if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_WRITE,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ tx_sgl_params, cmd_params,
+ dif_task_params);
+ else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+ return init_rw_iscsi_task(task_params,
+ ISCSI_TASK_TYPE_INITIATOR_READ,
+ conn_params,
+ (struct iscsi_common_hdr *)cmd_header,
+ rx_sgl_params, cmd_params,
+ dif_task_params);
+ else
+ return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)login_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0,
+ 0);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)login_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_task_params,
+ struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)nop_out_pdu_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+ set_local_completion_context(task_params->context);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size :
+ 0);
+
+ init_sqe(task_params, tx_sgl_task_params, NULL,
+ (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)logout_hdr,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0,
+ 0, 0);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)logout_hdr, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header)
+{
+ init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ init_sqe(task_params, NULL, NULL,
+ (struct iscsi_common_hdr *)tmf_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params)
+{
+ struct iscsi_task_context *cxt;
+
+ cxt = task_params->context;
+
+ init_default_iscsi_task(task_params,
+ (struct data_hdr *)text_header,
+ ISCSI_TASK_TYPE_MIDPATH);
+
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+ &cxt->ystorm_st_context.state.data_desc,
+ tx_params);
+
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+ &cxt->mstorm_st_context.data_desc,
+ rx_params);
+
+ cxt->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0);
+
+ init_ustorm_task_contexts(&cxt->ustorm_st_context,
+ &cxt->ustorm_ag_context,
+ task_params->rx_io_size ?
+ rx_params->total_buffer_size : 0,
+ task_params->tx_io_size ?
+ tx_params->total_buffer_size : 0, 0, 0);
+
+ init_sqe(task_params, tx_params, NULL,
+ (struct iscsi_common_hdr *)text_header, NULL,
+ ISCSI_TASK_TYPE_MIDPATH, false);
+
+ return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+ true);
+ return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644
index 000000000000..b6f24f91849d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -0,0 +1,117 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+ struct iscsi_task_context *context;
+ struct iscsi_wqe *sqe;
+ u32 tx_io_size;
+ u32 rx_io_size;
+ u16 conn_icid;
+ u16 itid;
+ u8 cq_rss_number;
+};
+
+struct iscsi_conn_params {
+ u32 first_burst_length;
+ u32 max_send_pdu_length;
+ u32 max_burst_length;
+ bool initial_r2t;
+ bool immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param conn_params - Connection Parameters
+ * @param cmd_params - command specific parameters
+ * @param cmd_pdu_header - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+ struct iscsi_conn_params *conn_params,
+ struct scsi_initiator_cmd_params *cmd_params,
+ struct iscsi_cmd_hdr *cmd_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params,
+ struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param login_req_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_login_req_hdr *login_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param nop_out_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+ struct iscsi_nop_out_hdr *nop_out_pdu_header,
+ struct scsi_sgl_task_params *tx_sgl_params,
+ struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param logout_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to SGL task params
+ * @param rx_sgl_task_params - Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_logout_req_hdr *logout_hdr,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param text_request_pdu_header - PDU Header Parameters
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+ struct iscsi_text_request_hdr *text_header,
+ struct scsi_sgl_task_params *tx_params,
+ struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644
index 000000000000..cdaf918f1019
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -0,0 +1,55 @@
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+ struct scsi_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+ u32 initial_ref_tag;
+ bool initial_ref_tag_is_valid;
+ u16 application_tag;
+ u16 application_tag_mask;
+ u16 dif_block_size_log;
+ bool dif_on_network;
+ bool dif_on_host;
+ u8 host_guard_type;
+ u8 protection_type;
+ u8 ref_tag_mask;
+ bool crc_seed;
+ bool tx_dif_conn_err_en;
+ bool ignore_app_tag;
+ bool keep_ref_tag_const;
+ bool validate_guard;
+ bool validate_app_tag;
+ bool validate_ref_tag;
+ bool forward_guard;
+ bool forward_app_tag;
+ bool forward_ref_tag;
+ bool forward_app_tag_with_mask;
+ bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+ struct scsi_sge extended_cdb_sge;
+ struct regpair sense_data_buffer_phys_addr;
+};
+#endif
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8e488de88ece..63d793f46064 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -12,8 +12,14 @@
#include "qedi_iscsi.h"
+#ifdef CONFIG_DEBUG_FS
+extern int qedi_do_not_recover;
+#else
+#define qedi_do_not_recover (0)
+#endif
+
extern uint qedi_io_tracing;
-extern int do_not_recover;
+
extern struct scsi_host_template qedi_host_template;
extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 3895bd555746..3548d46f9b27 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -175,7 +175,7 @@ static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
if (cmd->io_tbl.sge_tbl)
dma_free_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
- sizeof(struct iscsi_sge),
+ sizeof(struct scsi_sge),
cmd->io_tbl.sge_tbl,
cmd->io_tbl.sge_tbl_dma);
@@ -191,7 +191,7 @@ static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
struct qedi_cmd *cmd)
{
struct qedi_io_bdt *io = &cmd->io_tbl;
- struct iscsi_sge *sge;
+ struct scsi_sge *sge;
io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@ static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
{
- struct iscsi_sge *bd_tbl;
+ struct scsi_sge *bd_tbl;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
qedi_conn->gen_pdu.req_buf;
- bd_tbl->reserved0 = 0;
- bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+ bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
bd_tbl->sge_addr.hi =
(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
- bd_tbl->reserved0 = 0;
}
static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
@@ -833,7 +831,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
@@ -957,7 +955,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
struct qedi_endpoint *qedi_ep;
int ret = 0;
- if (do_not_recover)
+ if (qedi_do_not_recover)
return 1;
qedi_ep = ep->dd_data;
@@ -1025,7 +1023,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Do not recover cid=0x%x\n",
qedi_ep->iscsi_cid);
@@ -1039,7 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
}
- if (do_not_recover)
+ if (qedi_do_not_recover)
goto ep_exit_recover;
switch (qedi_ep->state) {
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index d3c06bbddb4e..3247287cb0e7 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -102,7 +102,7 @@ struct qedi_endpoint {
#define QEDI_SQ_WQES_MIN 16
struct qedi_io_bdt {
- struct iscsi_sge *sge_tbl;
+ struct scsi_sge *sge_tbl;
dma_addr_t sge_tbl_dma;
u16 sge_valid;
};
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5eda21d903e9..92775a8b74b1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
*/
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
- qedi_setup_int(qedi);
+ rc = qedi_setup_int(qedi);
if (rc)
goto stop_iscsi_func;
@@ -2007,6 +2007,7 @@ static void qedi_remove(struct pci_dev *pdev)
static struct pci_device_id qedi_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 9543a1b139d4..d61e3ac22e67 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
* this source tree.
*/
-#define QEDI_MODULE_VERSION "8.10.3.0"
+#define QEDI_MODULE_VERSION "8.10.4.0"
#define QEDI_DRIVER_MAJOR_VER 8
#define QEDI_DRIVER_MINOR_VER 10
-#define QEDI_DRIVER_REV_VER 3
+#define QEDI_DRIVER_REV_VER 4
#define QEDI_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5aa3212..de952935b5d2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
select FW_LOADER
+ select BTREE
---help---
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4000ba3df1c0..7c8d6c54ab70 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
- BUG_ON(atomic_read(&vha->vref_count));
-
qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
- if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
"Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 069104fed267..16d1cd50feed 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2553,13 +2553,13 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command "
"failed.\n");
- bsg_job->req->errors =
+ scsi_req(bsg_job->req)->result =
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
- bsg_job->req->errors =
+ scsi_req(bsg_job->req)->result =
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2570,7 +2570,7 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- bsg_job->req->errors = bsg_reply->result = -ENXIO;
+ scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 21d9fb7fc887..51b4179469d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
ql_dbg(level, vha, id,
"----- -----------------------------------------------\n");
- for (cnt = 0; cnt < size; cnt++, buf++) {
- if (cnt % 16 == 0)
- ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
- printk(" %02x", *buf);
- if (cnt % 16 == 15)
- printk("\n");
+ for (cnt = 0; cnt < size; cnt += 16) {
+ ql_dbg(level, vha, id, "%04x: ", cnt);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+ buf + cnt, min(16U, size - cnt), false);
}
- if (cnt % 16 != 0)
- printk("\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e66966a..c6bffe929fe7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438e3cce..ae119018dfaa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
+#include <linux/btree.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
struct completion comp;
} abt;
struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[28]; /* fr fw */
- __le16 out_mb[28]; /* to fw */
+ __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
+ struct completion comp;
+ int rc;
} mbx;
struct {
struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
uint32_t handle;
uint16_t flags;
uint16_t type;
- char *name;
+ const char *name;
int iocbs;
struct qla_qpair *qpair;
u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long plogi_nack_done_deadline;
+
u32 login_gen, last_login_gen;
u32 rscn_gen, last_rscn_gen;
u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_dif_statistics {
+ uint64_t dif_input_bytes;
+ uint64_t dif_output_bytes;
+ uint64_t dif_input_requests;
+ uint64_t dif_output_requests;
+ uint32_t dif_guard_err;
+ uint32_t dif_ref_tag_err;
+ uint32_t dif_app_tag_err;
+};
+
struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
uint32_t stat_max_pend_cmds;
uint32_t stat_max_qfull_cmds_alloc;
uint32_t stat_max_qfull_cmds_dropped;
+
+ struct qla_dif_statistics qla_dif_stats;
};
struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
unsigned long long transfer_bytes;
};
+struct qla_tc_param {
+ struct scsi_qla_host *vha;
+ uint32_t blk_sz;
+ uint32_t bufflen;
+ struct scatterlist *sg;
+ struct scatterlist *prot_sg;
+ struct crc_context *ctx;
+ uint8_t *ctx_dsd_alloced;
+};
+
/* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess;
+ struct dentry *dfs_tgt_port_database;
+
struct list_head q_full_list;
uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
+ struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+#define QLA_EARLY_LINKUP(_ha) \
+ ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+ _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
- /* 35 bits */
+
+ uint32_t lip_ae:1;
+ uint32_t n2n_ae:1;
+ uint32_t fw_started:1;
+ uint32_t fw_init_done:1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
#define P2P_LOOP 3
uint8_t interrupts_on;
uint32_t isp_abort_cnt;
-
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
struct list_head vp_fcports; /* list of fcports */
struct list_head work_list;
spinlock_t work_lock;
+ struct work_struct iocb_work;
/* Commonly used flags and state information. */
struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
/* Count of active session/fcport */
int fcport_count;
wait_queue_head_t fcport_waitQ;
+ wait_queue_head_t vref_waitq;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
mb(); \
if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
__bail = 1; \
} else { \
__bail = 0; \
} \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
+} while (0) \
#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce696bac..989e17b0758c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct fc_port *sess = NULL;
- struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- seq_printf(s, "%s\n",vha->host_str);
+ seq_printf(s, "%s\n", vha->host_str);
if (tgt) {
- seq_printf(s, "Port ID Port Name Handle\n");
+ seq_puts(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
-
static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open,
.read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
};
static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ struct gid_list_info *gid_list;
+ dma_addr_t gid_list_dma;
+ fc_port_t fc_port;
+ char *id_iter;
+ int rc, i;
+ uint16_t entries, loop_id;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+ seq_printf(s, "%s\n", vha->host_str);
+ if (tgt) {
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x705c,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
+
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
+
+ id_iter = (char *)gid_list;
+
+ seq_puts(s, "Port Name Port ID Loop ID\n");
+
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
+
+ fc_port.loop_id = loop_id;
+
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
+ }
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ }
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+
+ return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+ .open = qla2x00_dfs_tgt_port_database_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent);
+ /* DIF stats */
+ seq_printf(s, "DIF Inp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_bytes);
+ seq_printf(s, "DIF Outp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_bytes);
+ seq_printf(s, "DIF Inp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_requests);
+ seq_printf(s, "DIF Outp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_requests);
+ seq_printf(s, "DIF Guard err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_guard_err);
+ seq_printf(s, "DIF Ref tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+ seq_printf(s, "DIF App tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_app_tag_err);
return 0;
}
@@ -281,6 +367,14 @@ create_nodes:
goto out;
}
+ ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ if (!ha->tgt.dfs_tgt_port_database) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to create debugFS tgt_port_database node.\n");
+ goto out;
+ }
+
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->tgt.dfs_tgt_sess = NULL;
}
+ if (ha->tgt.dfs_tgt_port_database) {
+ debugfs_remove(ha->tgt.dfs_tgt_port_database);
+ ha->tgt.dfs_tgt_port_database = NULL;
+ }
+
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441d1d90..5b2451745e9f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
- dma_addr_t, uint);
+ dma_addr_t, uint16_t);
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+ uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+ struct port_database_24xx *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 26b201e17ee5..034743309ada 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- uint64_t zero = 0;
struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
- /* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
- rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID ||
- (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
- memcmp(fcport->port_name, pd->port_name, 8))) {
- /* We lost the device mid way. */
- rval = QLA_NOT_LOGGED_IN;
- goto gpd_error_out;
- }
-
- /* Names are little-endian. */
- memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
- /* Get port_id of device. */
- fcport->d_id.b.domain = pd->port_id[0];
- fcport->d_id.b.area = pd->port_id[1];
- fcport->d_id.b.al_pa = pd->port_id[2];
- fcport->d_id.b.rsvd_1 = 0;
-
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
- /* Passback COS information. */
- fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
- FC_COS_CLASS2 : FC_COS_CLASS3;
-
- if (pd->prli_svc_param_word_3[0] & BIT_7) {
- fcport->flags |= FCF_CONF_COMP_SUPPORTED;
- fcport->conf_compl_supported = 1;
- }
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
gpd_error_out:
memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_retry--;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return 0;
+ }
+
/* for pure Target Mode. Login will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return;
+ }
+
if (fcport->flags & FCF_ASYNC_SENT) {
fcport->login_retry++;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
complete(&abt->u.abt.comp);
}
-static int
+int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
+ ha->flags.fw_started = 1;
}
return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
uint8_t domain;
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ port_id_t id;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* Save Host port and loop ID. */
/* byte order - Big Endian */
- vha->d_id.b.domain = domain;
- vha->d_id.b.area = area;
- vha->d_id.b.al_pa = al_pa;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ id.b.domain = domain;
+ id.b.area = area;
+ id.b.al_pa = al_pa;
+ id.b.rsvd_1 = 0;
+ qlt_update_host_map(vha, id);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
+ ha->flags.fw_init_done = 1;
/*
* Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
}
}
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
ha->chip_reset++;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
return;
if (!ha->fw_major_version)
return;
+ if (!ha->flags.fw_started)
+ return;
ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
"Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
+
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 535079280288..ea027f6a7fd4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
@@ -1005,7 +1004,7 @@ alloc_and_fill:
int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8d72a2d86229..aac03504d9a3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ skip_rio:
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
ha->isp_ops->fw_dump(vha, 1);
+ ha->flags.fw_init_done = 0;
+ ha->flags.fw_started = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
+ ha->flags.lip_ae = 1;
+ ha->flags.n2n_ae = 0;
+
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -797,6 +802,10 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
/* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */
+ ha->flags.lip_ae = 0;
+ ha->flags.n2n_ae = 1;
+
if (IS_QLA2100(ha))
break;
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5034,
- "Async-%s error entry - hdl=%x"
+ "Async-%s error entry - %8phC hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, sp->handle, fcport->d_id.b.domain,
+ type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, fcport->port_name, sp->handle,
+ fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_CMD_FAILED:
+ if (iop[1] == 0x0606) {
+ /*
+ * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+ * Target side acked.
+ */
+ data[0] = MBS_COMMAND_COMPLETE;
+ goto logio_done;
+ }
+ data[0] = MBS_COMMAND_ERROR;
+ break;
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, fcport->port_name,
+ sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
sp->done(sp, 0);
}
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f417417..a113ab3592a7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+static struct mb_cmd_name {
+ uint16_t cmd;
+ const char *str;
+} mb_str[] = {
+ {MBC_GET_PORT_DATABASE, "GPDB"},
+ {MBC_GET_ID_LIST, "GIDList"},
+ {MBC_GET_LINK_PRIV_STATS, "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+ int i;
+ struct mb_cmd_name *e;
+
+ for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+ e = mb_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int
qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
- dma_addr_t stats_dma, uint options)
+ dma_addr_t stats_dma, uint16_t options)
{
int rval;
mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
- mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
- mcp->mb[2] = MSW(stats_dma);
- mcp->mb[3] = LSW(stats_dma);
- mcp->mb[6] = MSW(MSD(stats_dma));
- mcp->mb[7] = LSW(MSD(stats_dma));
- mcp->mb[8] = sizeof(struct link_statistics) / 4;
- mcp->mb[9] = vha->vp_idx;
- mcp->mb[10] = options;
- mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_2|MBX_1|MBX_0;
- mcp->tov = MBX_TOV_SECONDS;
- mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(vha, mcp);
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+ mc.mb[2] = MSW(stats_dma);
+ mc.mb[3] = LSW(stats_dma);
+ mc.mb[6] = MSW(MSD(stats_dma));
+ mc.mb[7] = LSW(MSD(stats_dma));
+ mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16(options);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
+ port_id_t id;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (rptid_entry->entry_status != 0)
return;
+ id.b.domain = rptid_entry->port_id[2];
+ id.b.area = rptid_entry->port_id[1];
+ id.b.al_pa = rptid_entry->port_id[0];
+ id.b.rsvd_1 = 0;
+
if (rptid_entry->format == 0) {
/* loop */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+ ql_dbg(ql_dbg_async, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n", rptid_entry->vp_setup,
rptid_entry->vp_acquired);
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+ ql_dbg(ql_dbg_async, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+ ql_dbg(ql_dbg_async, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", rptid_entry->vp_idx,
rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
}
fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vp, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ sp->u.iocb_cmd.u.mbx.rc = res;
+
+ complete(&sp->u.iocb_cmd.u.mbx.comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct srb_iocb *c;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+ c = &sp->u.iocb_cmd;
+ c->timeout = qla2x00_async_iocb_timeout;
+ init_completion(&c->u.mbx.comp);
+
+ sp->done = qla2x00_async_mb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&c->u.mbx.comp);
+ memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+ rval = c->u.mbx.rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ sp->free(sp);
+ break;
+ default:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ sp->free(sp);
+ break;
+ }
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_PORT_DATABASE;
+ mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[2] = MSW(pd_dma);
+ mc.mb[3] = LSW(pd_dma);
+ mc.mb[6] = MSW(MSD(pd_dma));
+ mc.mb[7] = LSW(MSD(pd_dma));
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %8phC fail\n", __func__, fcport->port_name);
+ goto done_free_sp;
+ }
+
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ __func__, fcport->port_name);
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+ return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rval = QLA_SUCCESS;
+ uint64_t zero = 0;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+ void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_ID_LIST;
+ mc.mb[2] = MSW(id_list_dma);
+ mc.mb[3] = LSW(id_list_dma);
+ mc.mb[6] = MSW(MSD(id_list_dma));
+ mc.mb[7] = LSW(MSD(id_list_dma));
+ mc.mb[8] = 0;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ *entries = mc.mb[1];
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d912ff..09a490c98763 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- spin_lock_irqsave(&ha->vport_slock, flags);
- while (atomic_read(&vha->vref_count)) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- msleep(500);
+ wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ 10*HZ);
- spin_lock_irqsave(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count)) {
+ ql_dbg(ql_dbg_vport, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ vha->vref_count = (atomic_t)ATOMIC_INIT(0);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
i++;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9bddbcff6d8a..1c7957903283 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1157,8 +1157,13 @@ static inline
uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
+ if (IS_P3P_TYPE(ha))
+ return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ else
+ return ((RD_REG_DWORD(&reg->host_status)) ==
+ ISP_REG_DISCONNECT);
}
/**************************************************************************
@@ -1648,7 +1653,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
/* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding.
*/
- if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
+ if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+ (sp->type == SRB_SCSI_CMD)) {
/* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() -
@@ -2557,6 +2563,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(work,
+ struct scsi_qla_host, iocb_work);
+ int cnt = 0;
+
+ while (!list_empty(&vha->work_list)) {
+ qla2x00_do_work(vha);
+ cnt++;
+ if (cnt > 10)
+ break;
+ }
+}
+
/*
* PCI driver interface
*/
@@ -3075,6 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3465,6 +3486,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
+ qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -4262,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
+ init_waitqueue_head(&vha->vref_waitq);
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -4313,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
spin_unlock_irqrestore(&vha->work_lock, flags);
- qla2xxx_wake_dpc(vha);
+
+ if (QLA_EARLY_LINKUP(vha->hw))
+ schedule_work(&vha->iocb_work);
+ else
+ qla2xxx_wake_dpc(vha);
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077684f0..0e03ca2ab3e5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+ struct abts_recv_from_24xx *);
+
/*
* Global Variables
*/
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+static const char *prot_op_str(u32 prot_op)
+{
+ switch (prot_op) {
+ case TARGET_PROT_NORMAL: return "NORMAL";
+ case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
+ case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
+ case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
+ case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
+ case TARGET_PROT_DIN_PASS: return "DIN_PASS";
+ case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
+ default: return "UNKNOWN";
+ }
+}
+
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
uint8_t *d_id)
{
- struct qla_hw_data *ha = vha->hw;
- uint8_t vp_idx;
-
- if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
- return NULL;
+ struct scsi_qla_host *host;
+ uint32_t key = 0;
- if (vha->d_id.b.al_pa == d_id[2])
+ if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+ (vha->d_id.b.al_pa == d_id[2]))
return vha;
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
+ key = (uint32_t)d_id[0] << 16;
+ key |= (uint32_t)d_id[1] << 8;
+ key |= (uint32_t)d_id[2];
- return NULL;
+ host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!host)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Unable to find host %06x\n", key);
+
+ return host;
}
static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
(struct abts_recv_from_24xx *)atio;
struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
entry->vp_index);
+ unsigned long flags;
+
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xffff,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, (response_t *)atio);
+ if (!ha_locked)
+ spin_lock_irqsave(&host->hw->hardware_lock, flags);
+ qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
break;
-
}
/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_gen++;
sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
sp->fcport->logout_on_delete = 1;
+ sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
break;
case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->deleted = 0;
c = "PRLI";
break;
case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
}
/* Get list of logged in devices */
- rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
"qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
request_t *pkt;
struct nack_to_isp *nack;
+ if (!ha->flags.fw_started)
+ return;
+
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
/* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
}
EXPORT_SYMBOL(qlt_free_mcmd);
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+ struct atio_from_isp *atio = &cmd->atio;
+ struct ctio7_to_24xx *ctio;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+ "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+ "sense_key=%02x, asc=%02x, ascq=%02x",
+ vha, atio, scsi_status, sense_key, asc, ascq);
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!ctio) {
+ ql_dbg(ql_dbg_async, vha, 0x3067,
+ "qla2x00t(%ld): %s failed: unable to allocate request packet",
+ vha->host_no, __func__);
+ goto out;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE;
+ ctio->nport_handle = cmd->sess->loop_id;
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
+ ctio->u.status1.scsi_status =
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+ ctio->u.status1.response_len = cpu_to_le16(18);
+ ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+ if (ctio->u.status1.residual != 0)
+ ctio->u.status1.scsi_status |=
+ cpu_to_le16(SS_RESIDUAL_UNDER);
+
+ /* Response code and sense key */
+ put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+ (&ctio->u.status1.sense_data)[0]);
+ /* Additional sense length */
+ put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+ /* ASC and ASCQ */
+ put_unaligned_le32(((asc << 24) | (ascq << 16)),
+ (&ctio->u.status1.sense_data)[3]);
+
+ /* Memory Barrier */
+ wmb();
+
+ qla2x00_start_iocbs(vha, vha->req);
+out:
+ return;
+}
+
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ ha->tgt.cmds[h - 1] = prm->cmd;
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
return cmd->bufflen > 0;
}
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd;
+ struct scsi_qla_host *vha;
+
+ /* asc 0x10=dif error */
+ if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+ cmd = prm->cmd;
+ vha = cmd->vha;
+ /* ASCQ */
+ switch (prm->sense_buffer[13]) {
+ case 1:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ }
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ }
+}
+
/*
* Called without ha->hardware_lock held
*/
@@ -2512,18 +2649,9 @@ skip_explict_conf:
for (i = 0; i < prm->sense_buffer_len/4; i++)
((uint32_t *)ctio->u.status1.sense_data)[i] =
cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
- if (unlikely((prm->sense_buffer_len % 4) != 0)) {
- static int q;
- if (q < 10) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04f,
- "qla_target(%d): %d bytes of sense "
- "lost", prm->tgt->ha->vp_idx,
- prm->sense_buffer_len % 4);
- q++;
- }
- }
-#endif
+
+ qlt_print_dif_err(prm);
+
} else {
ctio->u.status1.flags &=
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
/* Sense with len > 24, is it possible ??? */
}
-
-
-/* diff */
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
- /*
- * Uncomment when corresponding SCSI changes are done.
- *
- if (!sp->cmd->prot_chk)
- return 0;
- *
- */
switch (se_cmd->prot_op) {
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
return 0;
}
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ return 1;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
/*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
*/
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+ uint16_t *pfw_prot_opts)
{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+ scsi_qla_host_t *vha = cmd->tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t t32 = 0;
- /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+ /*
+ * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
* have been immplemented by TCM, before AppTag is avail.
* Look for modesense_handlers[]
*/
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
ctx->app_tag_mask[0] = 0x0;
ctx->app_tag_mask[1] = 0x0;
+ if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
switch (se_cmd->prot_type) {
case TARGET_DIF_TYPE0_PROT:
/*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
+ * No check for ql2xenablehba_err_chk, as it
+ * would be an I/O error if hba tag generation
+ * is not done.
*/
ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
/* enable ALL bytes of the ref tag */
ctx->ref_tag_mask[0] = 0xff;
ctx->ref_tag_mask[1] = 0xff;
ctx->ref_tag_mask[2] = 0xff;
ctx->ref_tag_mask[3] = 0xff;
break;
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
case TARGET_DIF_TYPE1_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
+ /*
+ * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+ * REF tag, and 16 bit app tag.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE2_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+ * tag has to match LBA in CDB + N
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE3_PROT:
- ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
- ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
- break;
+ /* For TYPE 3 protection: 16 bit GUARD only */
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
}
}
-
static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t h;
struct atio_from_isp *atio = &prm->cmd->atio;
+ struct qla_tc_param tc;
uint16_t t16;
ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
transfer_length = data_bytes;
- data_bytes += dif_bytes;
+ if (cmd->prot_sg_cnt)
+ data_bytes += dif_bytes;
break;
-
case TARGET_PROT_DIN_STRIP:
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
transfer_length = data_bytes + dif_bytes;
break;
-
default:
BUG();
break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
break;
}
-
/* ---- PKT ---- */
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
} else
ha->tgt.cmds[h-1] = prm->cmd;
-
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
-
pkt->dseg_count = prm->tot_dsds;
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
-
/* ----- CRC context -------- */
/* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set handle */
crc_ctx_pkt->handle = pkt->handle;
- qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+ qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
-
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
} else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+ memset((uint8_t *)&tc, 0 , sizeof(tc));
+ tc.vha = vha;
+ tc.blk_sz = cmd->blk_sz;
+ tc.bufflen = cmd->bufflen;
+ tc.sg = cmd->sg;
+ tc.prot_sg = cmd->prot_sg;
+ tc.ctx = crc_ctx_pkt;
+ tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
/* Walks data segments */
pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
- prm->tot_dsds, cmd))
+ prm->tot_dsds, &tc))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
- (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ (prm->tot_dsds - prm->prot_seg_cnt), &tc))
goto crc_queuing_error;
if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
- prm->prot_seg_cnt, cmd))
+ prm->prot_seg_cnt, &tc))
goto crc_queuing_error;
}
return QLA_SUCCESS;
crc_queuing_error:
/* Cleanup will be performed by the caller */
+ vha->hw->tgt.cmds[h - 1] = NULL;
return QLA_FUNCTION_FAILED;
}
-
/*
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
else
vha->tgt_counters.core_qla_que_buf++;
- if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+ if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+ if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
/*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
*/
-static inline int
+static void
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
- struct ctio_crc_from_fw *sts)
+ struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
uint8_t *ep = &sts->expected_dif[0];
- uint32_t e_ref_tag, a_ref_tag;
- uint16_t e_app_tag, a_app_tag;
- uint16_t e_guard, a_guard;
uint64_t lba = cmd->se_cmd.t_task_lba;
+ uint8_t scsi_status, sense_key, asc, ascq;
+ unsigned long flags;
- a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-
- e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
-
- ql_dbg(ql_dbg_tgt, vha, 0xe075,
- "iocb(s) %p Returned STATUS.\n", sts);
-
- ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
-
- /*
- * Ignore sector if:
- * For type 3: ref & app tag is all 'f's
- * For type 0,1,2: app tag is all 'f's
- */
- if ((a_app_tag == 0xffff) &&
- ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
- (a_ref_tag == 0xffffffff))) {
- uint32_t blocks_done;
-
- /* 2TB boundary case covered automatically with this */
- blocks_done = e_ref_tag - (uint32_t)lba + 1;
- cmd->se_cmd.bad_sector = e_ref_tag;
- cmd->se_cmd.pi_err = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xf074,
- "need to return scsi good\n");
-
- /* Update protection tag */
- if (cmd->prot_sg_cnt) {
- uint32_t i, k = 0, num_ent;
- struct scatterlist *sg, *sgl;
-
-
- sgl = cmd->prot_sg;
-
- /* Patch the corresponding protection tags */
- for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
- num_ent = sg_dma_len(sg) / 8;
- if (k + num_ent < blocks_done) {
- k += num_ent;
- continue;
- }
- k = blocks_done;
- break;
- }
+ cmd->trc_flags |= TRC_DIF_ERR;
- if (k != blocks_done) {
- ql_log(ql_log_warn, vha, 0xf076,
- "unexpected tag values tag:lba=%u:%llu)\n",
- e_ref_tag, (unsigned long long)lba);
- goto out;
- }
+ cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-#if 0
- struct sd_dif_tuple *spt;
- /* TODO:
- * This section came from initiator. Is it valid here?
- * should ulp be override with actual val???
- */
- spt = page_address(sg_page(sg)) + sg->offset;
- spt += j;
+ cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
- spt->app_tag = 0xffff;
- if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
- spt->ref_tag = 0xffffffff;
-#endif
- }
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+ "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
- return 0;
- }
+ scsi_status = sense_key = asc = ascq = 0;
- /* check guard */
- if (e_guard != a_guard) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe076,
- "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check appl tag */
+ if (cmd->e_app_tag != cmd->a_app_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_APP;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x2;
}
/* check ref tag */
- if (e_ref_tag != a_ref_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = e_ref_tag;
-
- ql_log(ql_log_warn, vha, 0xe077,
- "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
+ if (cmd->e_ref_tag != cmd->a_ref_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_REF;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x3;
goto out;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe078,
- "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check guard */
+ if (cmd->e_guard != cmd->a_guard) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ cmd->dif_err_code = DIF_ERR_GRD;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x1;
}
out:
- return 1;
-}
+ switch (cmd->state) {
+ case QLA_TGT_STATE_NEED_DATA:
+ /* handle_data will load DIF error code */
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ break;
+ default:
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ /* assume scsi status gets out on the wire.
+ * Will not wait for completion.
+ */
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+}
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
"Sending TERM ELS CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe080,
"qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
{
int term = 0;
+ if (cmd->se_cmd.prot_op)
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x] op %#x/%s",
+ cmd->lba, cmd->lba,
+ cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr,
+ cmd->se_cmd.prot_op,
+ prot_op_str(cmd->se_cmd.prot_op));
+
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ "qla_target(%d): CTIO with DIF_ERROR status %x "
+ "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+ "expect_dif[0x%llx]\n",
vha->vp_idx, status, cmd->state, se_cmd,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- if (qlt_handle_dif_error(vha, cmd, ctio)) {
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- /* scsi Write/xfer rdy complete */
- goto skip_term;
- } else {
- /* scsi read/xmit respond complete
- * call handle dif to send scsi status
- * rather than terminate exchange.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ha->tgt.tgt_ops->handle_dif_err(cmd);
- return;
- }
- } else {
- /* Need to generate a SCSI good completion.
- * because FW did not send scsi status.
- */
- status = 0;
- goto skip_term;
- }
- break;
+ qlt_handle_dif_error(vha, cmd, ctio);
+ return;
}
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
}
-skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+ if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+ sess->fw_login_state != DSC_LS_PLOGI_COMP) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ } else {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
} else {
if (sess) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post nack\n",
- __func__, __LINE__, sess->port_name);
-
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
SRB_NACK_PRLI);
res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
break;
-
case ELS_TPRLO:
if (le16_to_cpu(iocb->u.isp24.flags) &
NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, bool ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
+ unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
return 0;
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
qlt_send_busy(vha, atio, status);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return 1;
}
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_io, vha, 0x3064,
+ ql_dbg(ql_dbg_tgt, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
if (rc != 0) {
tgt->irq_cmd_count--;
return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
- rc = qla2x00_get_port_database(vha, fcport, 0);
+ rc = qla24xx_gpdb_wait(vha, fcport, 0);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
"qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term;
-
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
if (rc != 0)
goto out_term;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
out_term2:
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop)
- goto out_term;
+ goto out_term2;
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (!sess)
- goto out_term;
+ goto out_term2;
} else {
if (sess->deleted) {
sess = NULL;
- goto out_term;
+ goto out_term2;
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
- goto out_term;
+ goto out_term2;
}
}
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- if (rc != 0)
- goto out_term;
-
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ if (rc != 0)
+ goto out_term;
return;
+out_term2:
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
- if (base_vha->fc_vport)
- return 0;
-
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
+ if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+ ha->tgt.tgt_ops->add_target(base_vha);
+
return 0;
}
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+ struct scsi_qla_host *node;
+ u32 key = 0;
+
+ btree_for_each_safe32(&ha->tgt.host_map, key, node)
+ btree_remove32(&ha->tgt.host_map, key);
+
+ btree_destroy32(&ha->tgt.host_map);
+}
+
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
unsigned char *b)
{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
struct atio_from_isp *pkt;
int cnt, i;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_unknown_atio_work_fn);
qlt_clear_mode(base_vha);
+
+ rc = btree_init32(&ha->tgt.host_map);
+ if (rc)
+ ql_log(ql_log_info, base_vha, 0xffff,
+ "Unable to initialize ha->host_map btree\n");
+
+ qlt_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(op);
}
void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
+ void *slot;
+ u32 key;
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
+ key = vha->d_id.b24;
+
switch (cmd) {
case SET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->tgt.host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xffff,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->tgt.host_map, key);
+ vha->d_id.b24 = 0;
break;
}
}
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->d_id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ } else if (vha->d_id.b24 != id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ }
+}
+
static int __init qlt_parse_ini_mode(void)
{
if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dcaae37..d64420251194 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
}
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+ int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+ return (be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *);
+ int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+ int (*chk_dif_tags)(uint32_t tag);
+ void (*add_target)(struct scsi_qla_host *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
-#define QLA_TGT_ABTS 0xFFFB
-#define QLA_TGT_2G_ABORT_TASK 0xFFFA
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
TRC_CMD_FREE = BIT_17,
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
+ TRC_DIF_ERR = BIT_20,
};
struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
unsigned int sg_mapped:1;
unsigned int free_sg:1;
unsigned int write_data_transferred:1;
- unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
struct list_head cmd_list;
struct atio_from_isp atio;
- /* t10dif */
+
+ uint8_t ctx_dsd_alloced;
+
+ /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+ int8_t dif_err_code;
struct scatterlist *prot_sg;
uint32_t prot_sg_cnt;
- uint32_t blk_sz;
+ uint32_t blk_sz, num_blks;
+ uint8_t scsi_status, sense_key, asc, ascq;
+
struct crc_context *ctx;
+ uint8_t *cdb;
+ uint64_t lba;
+ uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
+ uint32_t a_ref_tag, e_ref_tag;
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+ uint8_t, uint8_t, uint8_t);
+
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964b7786..45bc84e8e3bf 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.38-k"
+#define QLA2XXX_VERSION "9.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 7
+#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0fa9672..7443e4efa3ae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
+ switch (cmd->dif_err_code) {
+ case DIF_ERR_GRD:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case DIF_ERR_REF:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_APP:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_NONE:
+ default:
+ break;
+ }
+
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
{
- struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
- /* take an extra kref to prevent cmd free too early.
- * need to wait for SCSI status/check condition to
- * finish responding generate by transport_generic_request_failure.
- */
- kref_get(&cmd->se_cmd.cmd_kref);
- transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+ return 0;
}
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+ uint16_t *pfw_prot_opts)
{
- INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+ *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+ return 0;
}
/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
- .handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
+ .get_dif_tags = tcm_qla2xxx_dif_tags,
+ .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
};
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
new file mode 100644
index 000000000000..a97c9507103d
--- /dev/null
+++ b/drivers/scsi/scsi_debugfs.c
@@ -0,0 +1,13 @@
+#include <linux/seq_file.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include "scsi_debugfs.h"
+
+void scsi_show_rq(struct seq_file *m, struct request *rq)
+{
+ struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+ char buf[80];
+
+ __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+ seq_printf(m, ", .cmd=%s", buf);
+}
diff --git a/drivers/scsi/scsi_debugfs.h b/drivers/scsi/scsi_debugfs.h
new file mode 100644
index 000000000000..951b043e82d0
--- /dev/null
+++ b/drivers/scsi/scsi_debugfs.h
@@ -0,0 +1,4 @@
+struct request;
+struct seq_file;
+
+void scsi_show_rq(struct seq_file *m, struct request *rq);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d70c67cf46ef..ecc07dab893d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1915,7 +1915,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- req->retries = 5;
+ rq->retries = 5;
blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9822fdeed0ce..a1effcce67b3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -34,6 +34,7 @@
#include <trace/events/scsi.h>
+#include "scsi_debugfs.h"
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -229,8 +230,8 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* @rq_flags: flags for ->rq_flags
* @resid: optional residual length
*
- * returns the req->errors value which is the scsi_cmnd result
- * field.
+ * Returns the scsi_cmnd result field if a command was executed, or a negative
+ * Linux error code if we didn't get that far.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
@@ -256,7 +257,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(rq->cmd, cmd, rq->cmd_len);
- req->retries = retries;
+ rq->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
@@ -281,7 +282,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
- ret = req->errors;
+ ret = rq->result;
out:
blk_put_request(req);
@@ -496,7 +497,7 @@ static void scsi_run_queue(struct request_queue *q)
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
- blk_mq_start_stopped_hw_queues(q, false);
+ blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
@@ -667,7 +668,7 @@ static bool scsi_end_request(struct request *req, int error,
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
- blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_run_hw_queues(q, true);
} else {
unsigned long flags;
@@ -797,8 +798,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/*
* __scsi_error_from_host_byte may have reset the host_byte
*/
- req->errors = cmd->result;
-
+ scsi_req(req)->result = cmd->result;
scsi_req(req)->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
@@ -835,7 +835,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/*
* Recovered errors need reporting, but they're always treated as
* success, so fiddle the result code here. For passthrough requests
- * we already took a copy of the original into rq->errors which
+ * we already took a copy of the original into sreq->result which
* is what gets returned to the user
*/
if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
@@ -1061,10 +1061,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
bool is_mq = (rq->mq_ctx != NULL);
- int error;
+ int error = BLKPREP_KILL;
if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
- return -EINVAL;
+ goto err_exit;
error = scsi_init_sgtable(rq, &cmd->sdb);
if (error)
@@ -1177,7 +1177,7 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
cmd->cmd_len = scsi_req(req)->cmd_len;
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
- cmd->allowed = req->retries;
+ cmd->allowed = scsi_req(req)->retries;
return BLKPREP_OK;
}
@@ -1281,7 +1281,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
switch (ret) {
case BLKPREP_KILL:
case BLKPREP_INVALID:
- req->errors = DID_NO_CONNECT << 16;
+ scsi_req(req)->result = DID_NO_CONNECT << 16;
/* release the command and kill it */
if (req->special) {
struct scsi_cmnd *cmd = req->special;
@@ -1905,7 +1905,7 @@ static int scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request, cmd->request->errors);
+ blk_mq_complete_request(cmd->request);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1974,7 +1974,7 @@ out:
case BLK_MQ_RQ_QUEUE_BUSY:
if (atomic_read(&sdev->device_busy) == 0 &&
!scsi_device_blocked(sdev))
- blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
break;
case BLK_MQ_RQ_QUEUE_ERROR:
/*
@@ -2154,10 +2154,13 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return q;
}
-static struct blk_mq_ops scsi_mq_ops = {
+static const struct blk_mq_ops scsi_mq_ops = {
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .show_rq = scsi_show_rq,
+#endif
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
.map_queues = scsi_map_queues,
@@ -2932,6 +2935,8 @@ EXPORT_SYMBOL(scsi_target_resume);
/**
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
* @sdev: device to block
+ * @wait: Whether or not to wait until ongoing .queuecommand() /
+ * .queue_rq() calls have finished.
*
* Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. May sleep.
@@ -2949,7 +2954,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
-scsi_internal_device_block(struct scsi_device *sdev)
+scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
@@ -2969,12 +2974,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_quiesce_queue(q);
+ if (wait)
+ blk_mq_quiesce_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_wait_for_queuecommand(sdev);
+ if (wait)
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
@@ -3036,7 +3045,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev);
+ scsi_internal_device_block(sdev, true);
}
static int
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 109802f776ed..50e624fb8307 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -111,7 +111,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
next_msg:
if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
- netlink_ack(skb, nlh, err);
+ netlink_ack(skb, nlh, err, NULL);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e20ab10623fb..59ebc1795bb3 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -187,8 +187,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
*/
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ca0e5a9a17f8..0ebe2f1bb908 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -184,9 +184,9 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
blk_rq_bytes(req->next_rq);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
ret = handler(shost, rphy, req);
- req->errors = ret;
+ scsi_req(req)->result = ret;
- blk_end_request_all(req, ret);
+ blk_end_request_all(req, 0);
spin_lock_irq(q->queue_lock);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4195b7c7f81e..f9d1432d7cc5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -419,6 +419,46 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(provisioning_mode);
+static const char *zeroing_mode[] = {
+ [SD_ZERO_WRITE] = "write",
+ [SD_ZERO_WS] = "writesame",
+ [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
+ [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
+};
+
+static ssize_t
+zeroing_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
+}
+
+static ssize_t
+zeroing_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!strncmp(buf, zeroing_mode[SD_ZERO_WRITE], 20))
+ sdkp->zeroing_mode = SD_ZERO_WRITE;
+ else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS], 20))
+ sdkp->zeroing_mode = SD_ZERO_WS;
+ else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS16_UNMAP], 20))
+ sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
+ else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS10_UNMAP], 20))
+ sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(zeroing_mode);
+
static ssize_t
max_medium_access_timeouts_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -497,6 +537,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_app_tag_own.attr,
&dev_attr_thin_provisioning.attr,
&dev_attr_provisioning_mode.attr,
+ &dev_attr_zeroing_mode.attr,
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
NULL,
@@ -646,26 +687,11 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
unsigned int logical_block_size = sdkp->device->sector_size;
unsigned int max_blocks = 0;
- q->limits.discard_zeroes_data = 0;
-
- /*
- * When LBPRZ is reported, discard alignment and granularity
- * must be fixed to the logical block size. Otherwise the block
- * layer will drop misaligned portions of the request which can
- * lead to data corruption. If LBPRZ is not set, we honor the
- * device preference.
- */
- if (sdkp->lbprz) {
- q->limits.discard_alignment = 0;
- q->limits.discard_granularity = logical_block_size;
- } else {
- q->limits.discard_alignment = sdkp->unmap_alignment *
- logical_block_size;
- q->limits.discard_granularity =
- max(sdkp->physical_block_size,
- sdkp->unmap_granularity * logical_block_size);
- }
-
+ q->limits.discard_alignment =
+ sdkp->unmap_alignment * logical_block_size;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
sdkp->provisioning_mode = mode;
switch (mode) {
@@ -683,19 +709,16 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_WS16:
max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS16_BLOCKS);
- q->limits.discard_zeroes_data = sdkp->lbprz;
break;
case SD_LBP_WS10:
max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS10_BLOCKS);
- q->limits.discard_zeroes_data = sdkp->lbprz;
break;
case SD_LBP_ZERO:
max_blocks = min_not_zero(sdkp->max_ws_blocks,
(u32)SD_MAX_WS10_BLOCKS);
- q->limits.discard_zeroes_data = 1;
break;
}
@@ -703,92 +726,122 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
-/**
- * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
- * @cmd: command to prepare
- *
- * Will set up either UNMAP, WRITE SAME(16) or WRITE SAME(10) depending
- * on the provisioning mode of the target device.
- **/
-static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
+static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
- struct request *rq = cmd->request;
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t sector = blk_rq_pos(rq);
- unsigned int nr_sectors = blk_rq_sectors(rq);
- unsigned int len;
- int ret;
+ struct request *rq = cmd->request;
+ u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ unsigned int data_len = 24;
char *buf;
- struct page *page;
-
- sector >>= ilog2(sdp->sector_size) - 9;
- nr_sectors >>= ilog2(sdp->sector_size) - 9;
- page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
- if (!page)
+ rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!rq->special_vec.bv_page)
return BLKPREP_DEFER;
+ rq->special_vec.bv_offset = 0;
+ rq->special_vec.bv_len = data_len;
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- switch (sdkp->provisioning_mode) {
- case SD_LBP_UNMAP:
- buf = page_address(page);
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = UNMAP;
+ cmd->cmnd[8] = 24;
- cmd->cmd_len = 10;
- cmd->cmnd[0] = UNMAP;
- cmd->cmnd[8] = 24;
+ buf = page_address(rq->special_vec.bv_page);
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(sector, &buf[8]);
+ put_unaligned_be32(nr_sectors, &buf[16]);
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(sector, &buf[8]);
- put_unaligned_be32(nr_sectors, &buf[16]);
+ cmd->allowed = SD_MAX_RETRIES;
+ cmd->transfersize = data_len;
+ rq->timeout = SD_TIMEOUT;
+ scsi_req(rq)->resid_len = data_len;
- len = 24;
- break;
+ return scsi_init_io(cmd);
+}
- case SD_LBP_WS16:
- cmd->cmd_len = 16;
- cmd->cmnd[0] = WRITE_SAME_16;
+static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
+{
+ struct scsi_device *sdp = cmd->device;
+ struct request *rq = cmd->request;
+ u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 data_len = sdp->sector_size;
+
+ rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!rq->special_vec.bv_page)
+ return BLKPREP_DEFER;
+ rq->special_vec.bv_offset = 0;
+ rq->special_vec.bv_len = data_len;
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_SAME_16;
+ if (unmap)
cmd->cmnd[1] = 0x8; /* UNMAP */
- put_unaligned_be64(sector, &cmd->cmnd[2]);
- put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
+ put_unaligned_be64(sector, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
- len = sdkp->device->sector_size;
- break;
+ cmd->allowed = SD_MAX_RETRIES;
+ cmd->transfersize = data_len;
+ rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+ scsi_req(rq)->resid_len = data_len;
- case SD_LBP_WS10:
- case SD_LBP_ZERO:
- cmd->cmd_len = 10;
- cmd->cmnd[0] = WRITE_SAME;
- if (sdkp->provisioning_mode == SD_LBP_WS10)
- cmd->cmnd[1] = 0x8; /* UNMAP */
- put_unaligned_be32(sector, &cmd->cmnd[2]);
- put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
+ return scsi_init_io(cmd);
+}
- len = sdkp->device->sector_size;
- break;
+static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
+{
+ struct scsi_device *sdp = cmd->device;
+ struct request *rq = cmd->request;
+ u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 data_len = sdp->sector_size;
- default:
- ret = BLKPREP_INVALID;
- goto out;
- }
+ rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!rq->special_vec.bv_page)
+ return BLKPREP_DEFER;
+ rq->special_vec.bv_offset = 0;
+ rq->special_vec.bv_len = data_len;
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- rq->timeout = SD_TIMEOUT;
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = WRITE_SAME;
+ if (unmap)
+ cmd->cmnd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
- cmd->transfersize = len;
cmd->allowed = SD_MAX_RETRIES;
+ cmd->transfersize = data_len;
+ rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
+ scsi_req(rq)->resid_len = data_len;
- rq->special_vec.bv_page = page;
- rq->special_vec.bv_offset = 0;
- rq->special_vec.bv_len = len;
+ return scsi_init_io(cmd);
+}
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- scsi_req(rq)->resid_len = len;
+static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
+ u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
+
+ if (!(rq->cmd_flags & REQ_NOUNMAP)) {
+ switch (sdkp->zeroing_mode) {
+ case SD_ZERO_WS16_UNMAP:
+ return sd_setup_write_same16_cmnd(cmd, true);
+ case SD_ZERO_WS10_UNMAP:
+ return sd_setup_write_same10_cmnd(cmd, true);
+ }
+ }
- ret = scsi_init_io(cmd);
-out:
- if (ret != BLKPREP_OK)
- __free_page(page);
- return ret;
+ if (sdp->no_write_same)
+ return BLKPREP_INVALID;
+ if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
+ return sd_setup_write_same16_cmnd(cmd, false);
+ return sd_setup_write_same10_cmnd(cmd, false);
}
static void sd_config_write_same(struct scsi_disk *sdkp)
@@ -817,9 +870,20 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
sdkp->max_ws_blocks = 0;
}
+ if (sdkp->lbprz && sdkp->lbpws)
+ sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
+ else if (sdkp->lbprz && sdkp->lbpws10)
+ sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
+ else if (sdkp->max_ws_blocks)
+ sdkp->zeroing_mode = SD_ZERO_WS;
+ else
+ sdkp->zeroing_mode = SD_ZERO_WRITE;
+
out:
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
+ blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
+ (logical_block_size >> 9));
}
/**
@@ -1156,7 +1220,20 @@ static int sd_init_command(struct scsi_cmnd *cmd)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- return sd_setup_discard_cmnd(cmd);
+ switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
+ case SD_LBP_UNMAP:
+ return sd_setup_unmap_cmnd(cmd);
+ case SD_LBP_WS16:
+ return sd_setup_write_same16_cmnd(cmd, true);
+ case SD_LBP_WS10:
+ return sd_setup_write_same10_cmnd(cmd, true);
+ case SD_LBP_ZERO:
+ return sd_setup_write_same10_cmnd(cmd, false);
+ default:
+ return BLKPREP_INVALID;
+ }
+ case REQ_OP_WRITE_ZEROES:
+ return sd_setup_write_zeroes_cmnd(cmd);
case REQ_OP_WRITE_SAME:
return sd_setup_write_same_cmnd(cmd);
case REQ_OP_FLUSH:
@@ -1810,6 +1887,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
{
int result = SCpnt->result;
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+ unsigned int sector_size = SCpnt->device->sector_size;
+ unsigned int resid;
struct scsi_sense_hdr sshdr;
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
struct request *req = SCpnt->request;
@@ -1818,6 +1897,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
switch (req_op(req)) {
case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
if (!result) {
@@ -1838,6 +1918,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
+ default:
+ /*
+ * In case of bogus fw or device, we could end up having
+ * an unaligned partial completion. Check this here and force
+ * alignment.
+ */
+ resid = scsi_get_resid(SCpnt);
+ if (resid & (sector_size - 1)) {
+ sd_printk(KERN_INFO, sdkp,
+ "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
+ resid, sector_size);
+ resid = min(scsi_bufflen(SCpnt),
+ round_up(resid, sector_size));
+ scsi_set_resid(SCpnt, resid);
+ }
}
if (result) {
@@ -2111,6 +2206,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2176,7 +2287,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2265,7 +2376,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2761,7 +2872,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
- if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ if (sdkp->lbpu && sdkp->max_unmap_blocks)
sd_config_discard(sdkp, SD_LBP_UNMAP);
else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
@@ -2965,7 +3076,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
} else
- rw_max = BLK_DEF_MAX_SECTORS;
+ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+ (sector_t)BLK_DEF_MAX_SECTORS);
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 952100232e24..61d02efd366c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -59,6 +59,13 @@ enum {
SD_LBP_DISABLE, /* Discard disabled due to failed cmd */
};
+enum {
+ SD_ZERO_WRITE = 0, /* Use WRITE(10/16) command */
+ SD_ZERO_WS, /* Use WRITE SAME(10/16) command */
+ SD_ZERO_WS16_UNMAP, /* Use WRITE SAME(16) with UNMAP */
+ SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */
+};
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -89,6 +96,7 @@ struct scsi_disk {
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
+ u8 zeroing_mode;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index fcf0fad72dd9..96855df9f49d 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -332,6 +332,7 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
break;
case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
/* Unlock the zone */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 06503c10ea27..0a38ba01b7b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -582,7 +582,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
- if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ if (unlikely(uaccess_kernel()))
return -EINVAL;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
@@ -1020,6 +1020,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
result = get_user(val, ip);
if (result)
return result;
+ if (val > SG_MAX_CDB_SIZE)
+ return -ENOMEM;
sfp->next_cmd_len = (val > 0) ? val : 0;
return 0;
case SG_GET_VERSION_NUM:
@@ -1321,7 +1323,7 @@ sg_rq_end_io(struct request *rq, int uptodate)
pr_info("%s: device detaching\n", __func__);
sense = req->sense;
- result = rq->errors;
+ result = req->result;
resid = req->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
@@ -1739,7 +1741,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
srp->rq = rq;
rq->end_io_data = srp;
- rq->retries = SG_DEFAULT_RETRIES;
+ req->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index abada16b375b..da979a73baa0 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1066,7 +1066,7 @@ ioctl_hba_rst:
if (!snic->remove_wait) {
spin_unlock_irqrestore(io_lock, flags);
SNIC_HOST_ERR(snic->shost,
- "reset_cmpl:host reset completed after timout\n");
+ "reset_cmpl:host reset completed after timeout\n");
ret = 1;
return ret;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0b29b9329b1c..a8f630213a1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e5ef78a6848e..1ea34d6f5437 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -480,7 +480,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->write_cnt);
- if (req->errors) {
+ if (scsi_req(req)->result) {
atomic64_add(atomic_read(&STp->stats->last_write_size)
- STp->buffer->cmdstat.residual,
&STp->stats->write_byte_cnt);
@@ -494,7 +494,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->read_cnt);
- if (req->errors) {
+ if (scsi_req(req)->result) {
atomic64_add(atomic_read(&STp->stats->last_read_size)
- STp->buffer->cmdstat.residual,
&STp->stats->read_byte_cnt);
@@ -518,7 +518,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate)
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
STp->buffer->cmdstat.residual = rq->resid_len;
st_do_stats(STp, req);
@@ -579,7 +579,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
memset(rq->cmd, 0, BLK_MAX_CDB);
memcpy(rq->cmd, cmd, rq->cmd_len);
req->timeout = timeout;
- req->retries = retries;
+ rq->retries = retries;
req->end_io_data = SRpnt;
blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index b2183415b9ee..ae966dc3bbc5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
*/
static int storvsc_timeout = 180;
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
#endif
@@ -1384,6 +1382,22 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Set blist flag to permit the reading of the VPD pages even when
+ * the target may claim SPC-2 compliance. MSFT targets currently
+ * claim SPC-2 compliance while they implement post SPC-2 features.
+ * With this flag we can correctly handle WRITE_SAME_16 issues.
+ *
+ * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+ * still supports REPORT LUN.
+ */
+ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
static int storvsc_device_configure(struct scsi_device *sdevice)
{
@@ -1397,14 +1411,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
sdevice->no_write_same = 1;
/*
- * Add blist flags to permit the reading of the VPD pages even when
- * the target may claim SPC-2 compliance. MSFT targets currently
- * claim SPC-2 compliance while they implement post SPC-2 features.
- * With this patch we can correctly handle WRITE_SAME_16 issues.
- */
- sdevice->sdev_bflags |= msft_blist_flags;
-
- /*
* If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
* if the device is a MSFT virtual device. If the host is
* WIN10 or newer, allow write_same.
@@ -1662,6 +1668,7 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
+ .slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
.this_id = -1,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 318e4a1f76c9..54deeb754db5 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -146,7 +146,7 @@ enum attr_idn {
/* Descriptor idn for Query requests */
enum desc_idn {
QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURAION = 0x1,
+ QUERY_DESC_IDN_CONFIGURATION = 0x1,
QUERY_DESC_IDN_UNIT = 0x2,
QUERY_DESC_IDN_RFU_0 = 0x3,
QUERY_DESC_IDN_INTERCONNECT = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
- /*
- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
- * of descriptor header.
- */
- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
- QUERY_DESC_POWER_MAX_SIZE = 0x62,
- QUERY_DESC_RFU_MAX_SIZE = 0x00,
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
};
/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..8e5e6c04c035 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -309,8 +309,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mmio_base = devm_ioremap_resource(dev, mem_res);
- if (IS_ERR(*(void **)&mmio_base)) {
- err = PTR_ERR(*(void **)&mmio_base);
+ if (IS_ERR(mmio_base)) {
+ err = PTR_ERR(mmio_base);
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5793cd7ad371..abc7e87937cc 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -100,19 +100,6 @@
#define ufshcd_hex_dump(prefix_str, buf, len) \
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
-static u32 ufs_query_desc_max_size[] = {
- QUERY_DESC_DEVICE_MAX_SIZE,
- QUERY_DESC_CONFIGURAION_MAX_SIZE,
- QUERY_DESC_UNIT_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_INTERCONNECT_MAX_SIZE,
- QUERY_DESC_STRING_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAX_SIZE,
- QUERY_DESC_POWER_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
-};
-
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -2841,7 +2828,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out;
}
- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
err = -EINVAL;
@@ -2922,6 +2909,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -2935,42 +3008,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
static int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u32 param_offset,
+ u8 param_offset,
u8 *param_read_buf,
- u32 param_size)
+ u8 param_size)
{
int ret;
u8 *desc_buf;
- u32 buff_len;
+ int buff_len;
bool is_kmalloc = true;
- /* safety checks */
- if (desc_id >= QUERY_DESC_IDN_MAX)
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- buff_len = ufs_query_desc_max_size[desc_id];
- if ((param_offset + param_size) > buff_len)
- return -EINVAL;
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!param_offset && (param_size == buff_len)) {
- /* memory space already available to hold full descriptor */
- desc_buf = param_read_buf;
- is_kmalloc = false;
- } else {
- /* allocate memory to hold full descriptor */
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
}
+ /* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
__func__, desc_id, desc_index, param_offset, ret);
-
goto out;
}
@@ -2982,25 +3062,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /*
- * While reading variable size descriptors (like string descriptor),
- * some UFS devices may report the "LENGTH" (field in "Transaction
- * Specific fields" of Query Response UPIU) same as what was requested
- * in Query Request UPIU instead of reporting the actual size of the
- * variable size descriptor.
- * Although it's safe to ignore the "LENGTH" field for variable size
- * descriptors as we can always derive the length of the descriptor from
- * the descriptor header fields. Hence this change impose the length
- * match check only for fixed size descriptors (for which we always
- * request the correct size as part of Query Request UPIU).
- */
- if ((desc_id != QUERY_DESC_IDN_STRING) &&
- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -4563,8 +4627,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
- if (ufshcd_is_clkscaling_supported(hba))
- hba->clk_scaling.active_reqs--;
}
/* clear corresponding bits of completed commands */
@@ -5866,8 +5928,8 @@ out:
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 desc_buf[hba->desc_size.pwr_desc];
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
@@ -5965,11 +6027,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
{
int err;
u8 model_index;
- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
+ u8 desc_buf[hba->desc_size.dev_desc];
- err = ufshcd_read_device_desc(hba, desc_buf,
- QUERY_DESC_DEVICE_MAX_SIZE);
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -5986,14 +6047,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
- QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ QUERY_DESC_MAX_SIZE, ASCII_STD);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
@@ -6199,6 +6260,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
hba->req_abort_count = 0;
}
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -6233,6 +6339,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
ret = ufs_get_device_desc(hba, &card);
if (ret) {
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6268,6 +6377,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7478,7 +7588,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
if (kstrtoul(buf, 0, &value))
return -EINVAL;
- if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+ if (value >= UFS_PM_LVL_MAX)
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7722,6 +7832,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7630600217a2..cdc8bd05f7df 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
struct ufs_query query;
};
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+};
+
/**
* struct ufs_clk_info - UFS clock related info
* @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
bool is_urgent_bkops_lvl_checked;
struct rw_semaphore clk_scaling_lock;
+ struct ufs_desc_size desc_size;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ef474a748744..c374e3b5c678 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
irq_flag &= ~PCI_IRQ_MSI;
error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
- if (error)
+ if (error < 0)
goto out_reset_adapter;
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9aa1fe1fc939..a6a8b60d4902 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -434,7 +434,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
if (seg_grants) {
page = virt_to_page(seg);
- off = (unsigned long)seg & ~PAGE_MASK;
+ off = offset_in_page(seg);
len = sizeof(struct scsiif_request_segment) * data_grants;
while (len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);