summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 00:16:38 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 00:16:38 +0400
commit5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7 (patch)
tree89cbf0b67634ecc43a863a6ca058ff749df3cce7 /drivers/scsi
parent6da6dc2380c3cfe8d6b59d7c3c55fdd7a521fe6c (diff)
parent9e45dd73234af9a59613dc2989dcc2df2dab847f (diff)
downloadlinux-5aa1c98862d3f365d9cf6d0833d5dc127d2a76e7.tar.xz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James "Jej B" Bottomley: "The patch set is mostly driver updates (qla4, qla2 [ISF support updates], lpfc, aacraid [dual firmware image support]) and a few bug fixes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits) [SCSI] iscsi_tcp: support PF_MEMALLOC/__GFP_MEMALLOC [SCSI] libiscsi: avoid unnecessary multiple NULL assignments [SCSI] qla4xxx: Update driver version to 5.03.00-k8 [SCSI] qla4xxx: Added print statements to display AENs [SCSI] qla4xxx: Use correct value for max flash node entries [SCSI] qla4xxx: Restrict logout from boot target session using session id [SCSI] qla4xxx: Use correct flash ddb offset for ISP40XX [SCSI] isci: add CONFIG_PM_SLEEP to suspend/resume functions [SCSI] scsi_dh_alua: Add module parameter to allow failover to non preferred path without STPG [SCSI] qla2xxx: Update the driver version to 8.05.00.03-k. [SCSI] qla2xxx: Obtain loopback iteration count from bsg request. [SCSI] qla2xxx: Add clarifying printk to thermal access fail cases. [SCSI] qla2xxx: Remove duplicated include form qla_isr.c [SCSI] qla2xxx: Enhancements to support ISPFx00. [SCSI] qla4xxx: Update driver version to 5.03.00-k7 [SCSI] qla4xxx: Replace dev type macros with generic portal type macros [SCSI] scsi_transport_iscsi: Declare portal type string macros for generic use [SCSI] qla4xxx: Add flash node mgmt support [SCSI] libiscsi: export function iscsi_switch_str_param [SCSI] scsi_transport_iscsi: Add flash node mgmt support ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aacraid/aacraid.h6
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/src.c26
-rw-r--r--drivers/scsi/csiostor/csio_hw.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c17
-rw-r--r--drivers/scsi/isci/init.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c18
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c1107
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c154
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h236
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h47
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3476
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h510
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c212
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h19
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h38
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c47
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c120
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c19
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1731
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c1002
49 files changed, 8694 insertions, 800 deletions
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index a6f7190c09a4..9323d058706b 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30000
+# define AAC_DRIVER_BUILD 30200
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -1918,6 +1918,10 @@ extern struct aac_common aac_config;
#define MONITOR_PANIC 0x00000020
#define KERNEL_UP_AND_RUNNING 0x00000080
#define KERNEL_PANIC 0x00000100
+#define FLASH_UPD_PENDING 0x00002000
+#define FLASH_UPD_SUCCESS 0x00004000
+#define FLASH_UPD_FAILED 0x00008000
+#define FWUPD_TIMEOUT (5 * 60)
/*
* Doorbell bit defines
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 3f759957f4b4..177b094c7792 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -214,7 +214,7 @@ int aac_send_shutdown(struct aac_dev * dev)
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
- cmd->cid = cpu_to_le32(0xffffffff);
+ cmd->cid = cpu_to_le32(0xfffffffe);
status = aac_fib_send(ContainerCommand,
fibctx,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index e2e349204e7d..0f56d8d7524f 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -703,6 +703,28 @@ int aac_srcv_init(struct aac_dev *dev)
!aac_src_restart_adapter(dev, 0))
++restart;
/*
+ * Check to see if flash update is running.
+ * Wait for the adapter to be up and running. Wait up to 5 minutes
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & FLASH_UPD_PENDING) {
+ start = jiffies;
+ do {
+ status = src_readl(dev, MUnit.OMR);
+ if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
+ printk(KERN_ERR "%s%d: adapter flash update failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ } while (!(status & FLASH_UPD_SUCCESS) &&
+ !(status & FLASH_UPD_FAILED));
+ /* Delay 10 seconds.
+ * Because right now FW is doing a soft reset,
+ * do not read scratch pad register at this time
+ */
+ ssleep(10);
+ }
+ /*
* Check to see if the board panic'd while booting.
*/
status = src_readl(dev, MUnit.OMR);
@@ -730,7 +752,9 @@ int aac_srcv_init(struct aac_dev *dev)
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
- while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING) ||
+ status == 0xffffffff) {
if ((restart &&
(status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
time_after(jiffies, start+HZ*startup_timeout)) {
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb4fc70..7dbaf58fab9f 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3892,7 +3892,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
struct csio_fl_dma_buf *flb, void *priv)
{
__u8 op;
- __be64 *data;
void *msg = NULL;
uint32_t msg_len = 0;
bool msg_sg = 0;
@@ -3908,8 +3907,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
msg = (void *) flb;
msg_len = flb->totlen;
msg_sg = 1;
-
- data = (__be64 *) msg;
} else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
CSIO_INC_STATS(hw, n_cpl_fw6_msg);
@@ -3917,8 +3914,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
msg = (void *)((uintptr_t)wr + sizeof(__be64));
msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
sizeof(struct cpl_fw4_msg);
-
- data = (__be64 *) msg;
} else {
csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
CSIO_INC_STATS(hw, n_cpl_unexp);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6f4d8e6f32f1..68adb8955d2d 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -232,13 +232,13 @@ static void stpg_endio(struct request *req, int error)
struct scsi_sense_hdr sense_hdr;
unsigned err = SCSI_DH_OK;
- if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE) {
+ if (host_byte(req->errors) != DID_OK ||
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
err = SCSI_DH_IO;
goto done;
}
- if (h->senselen > 0) {
+ if (req->sense_len > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@@ -255,7 +255,9 @@ static void stpg_endio(struct request *req, int error)
ALUA_DH_NAME, sense_hdr.sense_key,
sense_hdr.asc, sense_hdr.ascq);
err = SCSI_DH_IO;
- }
+ } else if (error)
+ err = SCSI_DH_IO;
+
if (err == SCSI_DH_OK) {
h->state = TPGS_STATE_OPTIMIZED;
sdev_printk(KERN_INFO, h->sdev,
@@ -710,6 +712,10 @@ static int alua_set_params(struct scsi_device *sdev, const char *params)
return result;
}
+static uint optimize_stpg;
+module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
+
/*
* alua_activate - activate a path
* @sdev: device on the path to be activated
@@ -731,6 +737,9 @@ static int alua_activate(struct scsi_device *sdev,
if (err != SCSI_DH_OK)
goto out;
+ if (optimize_stpg)
+ h->flags |= ALUA_OPTIMIZE_STPG;
+
if (h->tpgs & TPGS_MODE_EXPLICIT) {
switch (h->state) {
case TPGS_STATE_NONOPTIMIZED:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 2839baa82a5a..d25d0d859f05 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -721,7 +721,7 @@ static void isci_pci_remove(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int isci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -770,18 +770,16 @@ static int isci_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
-#endif
static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
.remove = isci_pci_remove,
-#ifdef CONFIG_PM
.driver.pm = &isci_pm_ops,
-#endif
};
static __init int isci_init(void)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 1b91ca0dc1e3..9e2588a6881c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -370,17 +370,24 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
- int rc;
+ unsigned long pflags = current->flags;
+ int rc = 0;
+
+ current->flags |= PF_MEMALLOC;
while (iscsi_sw_tcp_xmit_qlen(conn)) {
rc = iscsi_sw_tcp_xmit(conn);
- if (rc == 0)
- return -EAGAIN;
+ if (rc == 0) {
+ rc = -EAGAIN;
+ break;
+ }
if (rc < 0)
- return rc;
+ break;
+ rc = 0;
}
- return 0;
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ return rc;
}
/*
@@ -665,6 +672,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_reuse = SK_CAN_REUSE;
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
+ sk_set_memalloc(sk);
iscsi_sw_tcp_conn_set_callbacks(conn);
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 82c3fd4bc938..5de946984500 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -507,7 +507,6 @@ static void iscsi_free_task(struct iscsi_task *task)
kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
if (sc) {
- task->sc = NULL;
/* SCSI eh reuses commands to verify us */
sc->SCp.ptr = NULL;
/*
@@ -3142,7 +3141,7 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
-static int iscsi_switch_str_param(char **param, char *new_val_buf)
+int iscsi_switch_str_param(char **param, char *new_val_buf)
{
char *new_val;
@@ -3159,6 +3158,7 @@ static int iscsi_switch_str_param(char **param, char *new_val_buf)
*param = new_val;
return 0;
}
+EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a364cae9e984..9290713af253 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -692,7 +692,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*/
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
- while (pring->txcmplq_cnt) {
+ while (!list_empty(&pring->txcmplq)) {
msleep(10);
if (cnt++ > 500) { /* 5 secs */
lpfc_printf_log(phba,
@@ -2302,11 +2302,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
-int lpfc_enable_rrq;
+int lpfc_enable_rrq = 2;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
-lpfc_param_init(enable_rrq, 0, 0, 1);
+/*
+# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
+# 0x0 = disabled, XRI/OXID use not tracked.
+# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
+# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
+*/
+lpfc_param_init(enable_rrq, 2, 0, 2);
static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
/*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d106456f1d..888666892004 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -64,18 +64,14 @@ struct lpfc_bsg_event {
struct list_head events_to_get;
struct list_head events_to_see;
- /* job waiting for this event to finish */
- struct fc_bsg_job *set_job;
+ /* driver data associated with the job */
+ void *dd_data;
};
struct lpfc_bsg_iocb {
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq *rspiocbq;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *rmp;
struct lpfc_nodelist *ndlp;
-
- /* job waiting for this iocb to finish */
- struct fc_bsg_job *set_job;
};
struct lpfc_bsg_mbox {
@@ -86,20 +82,13 @@ struct lpfc_bsg_mbox {
uint32_t mbOffset; /* from app */
uint32_t inExtWLen; /* from app */
uint32_t outExtWLen; /* from app */
-
- /* job waiting for this mbox command to finish */
- struct fc_bsg_job *set_job;
};
#define MENLO_DID 0x0000FC0E
struct lpfc_bsg_menlo {
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq *rspiocbq;
- struct lpfc_dmabuf *bmp;
-
- /* job waiting for this iocb to finish */
- struct fc_bsg_job *set_job;
+ struct lpfc_dmabuf *rmp;
};
#define TYPE_EVT 1
@@ -108,6 +97,7 @@ struct lpfc_bsg_menlo {
#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
+ struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
@@ -141,6 +131,138 @@ struct lpfc_dmabufext {
uint32_t flag;
};
+static void
+lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+ struct lpfc_dmabuf *mlast, *next_mlast;
+
+ if (mlist) {
+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
+ list) {
+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+ list_del(&mlast->list);
+ kfree(mlast);
+ }
+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+ kfree(mlist);
+ }
+ return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
+ int outbound_buffers, struct ulp_bde64 *bpl,
+ int *bpl_entries)
+{
+ struct lpfc_dmabuf *mlist = NULL;
+ struct lpfc_dmabuf *mp;
+ unsigned int bytes_left = size;
+
+ /* Verify we can support the size specified */
+ if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
+ return NULL;
+
+ /* Determine the number of dma buffers to allocate */
+ *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
+ size/LPFC_BPL_SIZE);
+
+ /* Allocate dma buffer and place in BPL passed */
+ while (bytes_left) {
+ /* Allocate dma buffer */
+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!mp) {
+ if (mlist)
+ lpfc_free_bsg_buffers(phba, mlist);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mp->list);
+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+
+ if (!mp->virt) {
+ kfree(mp);
+ if (mlist)
+ lpfc_free_bsg_buffers(phba, mlist);
+ return NULL;
+ }
+
+ /* Queue it to a linked list */
+ if (!mlist)
+ mlist = mp;
+ else
+ list_add_tail(&mp->list, &mlist->list);
+
+ /* Add buffer to buffer pointer list */
+ if (outbound_buffers)
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ else
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+ bpl->tus.f.bdeSize = (uint16_t)
+ (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
+ bytes_left);
+ bytes_left -= bpl->tus.f.bdeSize;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+ }
+ return mlist;
+}
+
+static unsigned int
+lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
+ struct fc_bsg_buffer *bsg_buffers,
+ unsigned int bytes_to_transfer, int to_buffers)
+{
+
+ struct lpfc_dmabuf *mp;
+ unsigned int transfer_bytes, bytes_copied = 0;
+ unsigned int sg_offset, dma_offset;
+ unsigned char *dma_address, *sg_address;
+ struct scatterlist *sgel;
+ LIST_HEAD(temp_list);
+
+
+ list_splice_init(&dma_buffers->list, &temp_list);
+ list_add(&dma_buffers->list, &temp_list);
+ sg_offset = 0;
+ sgel = bsg_buffers->sg_list;
+ list_for_each_entry(mp, &temp_list, list) {
+ dma_offset = 0;
+ while (bytes_to_transfer && sgel &&
+ (dma_offset < LPFC_BPL_SIZE)) {
+ dma_address = mp->virt + dma_offset;
+ if (sg_offset) {
+ /* Continue previous partial transfer of sg */
+ sg_address = sg_virt(sgel) + sg_offset;
+ transfer_bytes = sgel->length - sg_offset;
+ } else {
+ sg_address = sg_virt(sgel);
+ transfer_bytes = sgel->length;
+ }
+ if (bytes_to_transfer < transfer_bytes)
+ transfer_bytes = bytes_to_transfer;
+ if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
+ transfer_bytes = LPFC_BPL_SIZE - dma_offset;
+ if (to_buffers)
+ memcpy(dma_address, sg_address, transfer_bytes);
+ else
+ memcpy(sg_address, dma_address, transfer_bytes);
+ dma_offset += transfer_bytes;
+ sg_offset += transfer_bytes;
+ bytes_to_transfer -= transfer_bytes;
+ bytes_copied += transfer_bytes;
+ if (sg_offset >= sgel->length) {
+ sg_offset = 0;
+ sgel = sg_next(sgel);
+ }
+ }
+ }
+ list_del_init(&dma_buffers->list);
+ list_splice(&temp_list, &dma_buffers->list);
+ return bytes_copied;
+}
+
/**
* lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
* @phba: Pointer to HBA context object.
@@ -166,62 +288,72 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
+ unsigned int rsp_size;
int rc = 0;
+ dd_data = cmdiocbq->context1;
+
+ /* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
- dd_data = cmdiocbq->context2;
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- return;
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
}
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
iocb = &dd_data->context_un.iocb;
- job = iocb->set_job;
- job->dd_data = NULL; /* so timeout handler does not reply */
-
- bmp = iocb->bmp;
+ ndlp = iocb->ndlp;
+ rmp = iocb->rmp;
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
rsp = &rspiocbq->iocb;
- ndlp = cmdiocbq->context1;
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ /* Copy the completed data or set the error status */
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
+ if (job) {
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
rc = -EACCES;
- break;
}
- } else
- rc = -EACCES;
- } else
- job->reply->reply_payload_rcv_len =
- rsp->un.genreq64.bdl.bdeSize;
+ } else {
+ rsp_size = rsp->un.genreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ lpfc_bsg_copy_data(rmp, &job->reply_payload,
+ rsp_size, 0);
+ }
+ }
+ lpfc_free_bsg_buffers(phba, cmp);
+ lpfc_free_bsg_buffers(phba, rmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
- kfree(bmp);
kfree(dd_data);
- /* make error code available to userspace */
- job->reply->result = rc;
- /* complete the job back to userspace */
- job->job_done(job);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
return;
}
@@ -240,12 +372,9 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
IOCB_t *cmd;
- struct lpfc_dmabuf *bmp = NULL;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
int request_nseg;
int reply_nseg;
- struct scatterlist *sgel = NULL;
- int numbde;
- dma_addr_t busaddr;
struct bsg_job_data *dd_data;
uint32_t creg_val;
int rc = 0;
@@ -268,54 +397,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
goto no_ndlp;
}
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_ndlp;
- }
-
if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
rc = -ENODEV;
- goto free_bmp;
+ goto free_ndlp;
}
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- goto free_bmp;
+ goto free_ndlp;
}
cmd = &cmdiocbq->iocb;
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto free_cmdiocbq;
+ }
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
- goto free_cmdiocbq;
+ goto free_bmp;
}
INIT_LIST_HEAD(&bmp->list);
+
bpl = (struct ulp_bde64 *) bmp->virt;
- request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
+ request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &request_nseg);
+ if (!cmp) {
+ rc = -ENOMEM;
+ goto free_bmp;
}
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
- reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
+ bpl += request_nseg;
+ reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+ rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+ bpl, &reply_nseg);
+ if (!rmp) {
+ rc = -ENOMEM;
+ goto free_cmp;
}
cmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -343,17 +468,20 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmd->ulpTimeout = timeout;
cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
- cmdiocbq->context1 = ndlp;
- cmdiocbq->context2 = dd_data;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = cmp;
+ cmdiocbq->context3 = bmp;
dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.set_job = job;
- dd_data->context_un.iocb.bmp = bmp;
+ dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = rmp;
+ job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
rc = -EIO ;
- goto free_cmdiocbq;
+ goto free_rmp;
}
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
@@ -368,19 +496,18 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
else
rc = -EIO;
-
/* iocb failed so cleanup */
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
-free_cmdiocbq:
- lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_rmp:
+ lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+ lpfc_free_bsg_buffers(phba, cmp);
free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
+free_cmdiocbq:
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
free_ndlp:
lpfc_nlp_put(ndlp);
no_ndlp:
@@ -418,67 +545,68 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct fc_bsg_job *job;
IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
- struct lpfc_dmabuf *pbuflist = NULL;
+ struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
uint8_t *rjt_data;
unsigned long flags;
+ unsigned int rsp_size;
int rc = 0;
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = cmdiocbq->context1;
- /* normal completion and timeout crossed paths, already done */
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- return;
- }
+ ndlp = dd_data->context_un.iocb.ndlp;
+ cmdiocbq->context1 = ndlp;
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
- if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job = dd_data->context_un.iocb.set_job;
- cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
- rspiocbq = dd_data->context_un.iocb.rspiocbq;
rsp = &rspiocbq->iocb;
- ndlp = dd_data->context_un.iocb.ndlp;
+ pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ prsp = (struct lpfc_dmabuf *)pcmd->list.next;
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ /* Copy the completed job data or determine the job status if job is
+ * still active
+ */
- if (job->reply->result == -EAGAIN)
- rc = -EAGAIN;
- else if (rsp->ulpStatus == IOSTAT_SUCCESS)
- job->reply->reply_payload_rcv_len =
- rsp->un.elsreq64.bdl.bdeSize;
- else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- job->reply->reply_payload_rcv_len =
- sizeof(struct fc_bsg_ctels_reply);
- /* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
- els_reply = &job->reply->reply_data.ctels_reply;
- els_reply->status = FC_CTELS_STATUS_REJECT;
- els_reply->rjt_data.action = rjt_data[3];
- els_reply->rjt_data.reason_code = rjt_data[2];
- els_reply->rjt_data.reason_explanation = rjt_data[1];
- els_reply->rjt_data.vendor_unique = rjt_data[0];
- } else
- rc = -EIO;
+ if (job) {
+ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+ rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ prsp->virt,
+ rsp_size);
+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ /* LS_RJT data returned in word 4 */
+ rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply->status = FC_CTELS_STATUS_REJECT;
+ els_reply->rjt_data.action = rjt_data[3];
+ els_reply->rjt_data.reason_code = rjt_data[2];
+ els_reply->rjt_data.reason_explanation = rjt_data[1];
+ els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else {
+ rc = -EIO;
+ }
+ }
- pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
- lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
- lpfc_sli_release_iocbq(phba, rspiocbq);
- lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
+ lpfc_els_free_iocb(phba, cmdiocbq);
kfree(dd_data);
- /* make error code available to userspace */
- job->reply->result = rc;
- job->dd_data = NULL;
- /* complete the job back to userspace */
- job->job_done(job);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
return;
}
@@ -496,19 +624,8 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
uint32_t elscmd;
uint32_t cmdsize;
uint32_t rspsize;
- struct lpfc_iocbq *rspiocbq;
struct lpfc_iocbq *cmdiocbq;
- IOCB_t *rsp;
uint16_t rpi = 0;
- struct lpfc_dmabuf *pcmd;
- struct lpfc_dmabuf *prsp;
- struct lpfc_dmabuf *pbuflist = NULL;
- struct ulp_bde64 *bpl;
- int request_nseg;
- int reply_nseg;
- struct scatterlist *sgel = NULL;
- int numbde;
- dma_addr_t busaddr;
struct bsg_job_data *dd_data;
uint32_t creg_val;
int rc = 0;
@@ -516,6 +633,15 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
+ /* verify the els command is not greater than the
+ * maximum ELS transfer size.
+ */
+
+ if (job->request_payload.payload_len > FCELSSIZE) {
+ rc = -EINVAL;
+ goto no_dd_data;
+ }
+
/* allocate our bsg tracking structure */
dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
if (!dd_data) {
@@ -525,88 +651,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
goto no_dd_data;
}
- if (!lpfc_nlp_get(ndlp)) {
- rc = -ENODEV;
- goto free_dd_data;
- }
-
elscmd = job->request->rqst_data.r_els.els_code;
cmdsize = job->request_payload.payload_len;
rspsize = job->reply_payload.payload_len;
- rspiocbq = lpfc_sli_get_iocbq(phba);
- if (!rspiocbq) {
- lpfc_nlp_put(ndlp);
- rc = -ENOMEM;
+
+ if (!lpfc_nlp_get(ndlp)) {
+ rc = -ENODEV;
goto free_dd_data;
}
- rsp = &rspiocbq->iocb;
- rpi = ndlp->nlp_rpi;
+ /* We will use the allocated dma buffers by prep els iocb for command
+ * and response to ensure if the job times out and the request is freed,
+ * we won't be dma into memory that is no longer allocated to for the
+ * request.
+ */
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
rc = -EIO;
- goto free_rspiocbq;
+ goto release_ndlp;
}
- /* prep els iocb set context1 to the ndlp, context2 to the command
- * dmabuf, context3 holds the data dmabuf
- */
- pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
- prsp = (struct lpfc_dmabuf *) pcmd->list.next;
- lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
- kfree(pcmd);
- lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
- kfree(prsp);
- cmdiocbq->context2 = NULL;
-
- pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
- bpl = (struct ulp_bde64 *) pbuflist->virt;
-
- request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
- }
+ rpi = ndlp->nlp_rpi;
+
+ /* Transfer the request payload to allocated command dma buffer */
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdsize);
- reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
- }
- cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
if (phba->sli_rev == LPFC_SLI_REV4)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->context1 = NULL;
- cmdiocbq->context2 = NULL;
-
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
cmdiocbq->context1 = dd_data;
cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->context2 = rspiocbq;
+ cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
- dd_data->context_un.iocb.rspiocbq = rspiocbq;
- dd_data->context_un.iocb.set_job = job;
- dd_data->context_un.iocb.bmp = NULL;
dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = NULL;
+ job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -617,8 +706,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- lpfc_nlp_put(ndlp);
+
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
else if (rc == IOCB_BUSY)
@@ -627,17 +717,12 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
rc = -EIO;
linkdown_err:
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
+ cmdiocbq->context1 = ndlp;
+ lpfc_els_free_iocb(phba, cmdiocbq);
-free_rspiocbq:
- lpfc_sli_release_iocbq(phba, rspiocbq);
+release_ndlp:
+ lpfc_nlp_put(ndlp);
free_dd_data:
kfree(dd_data);
@@ -680,6 +765,7 @@ lpfc_bsg_event_free(struct kref *kref)
kfree(ed);
}
+ kfree(evt->dd_data);
kfree(evt);
}
@@ -723,6 +809,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
evt->req_id = ev_req_id;
evt->reg_id = ev_reg_id;
evt->wait_time_stamp = jiffies;
+ evt->dd_data = NULL;
init_waitqueue_head(&evt->wq);
kref_init(&evt->kref);
return evt;
@@ -790,6 +877,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
struct fc_bsg_job *job = NULL;
+ struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
@@ -986,10 +1074,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
list_move(evt->events_to_see.prev, &evt->events_to_get);
- lpfc_bsg_event_unref(evt);
- job = evt->set_job;
- evt->set_job = NULL;
+ dd_data = (struct bsg_job_data *)evt->dd_data;
+ job = dd_data->set_job;
+ dd_data->set_job = NULL;
+ lpfc_bsg_event_unref(evt);
if (job) {
job->reply->reply_payload_rcv_len = size;
/* make error code available to userspace */
@@ -1078,14 +1167,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
goto job_error;
}
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (dd_data == NULL) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2734 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto job_error;
- }
-
event_req = (struct set_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
@@ -1095,6 +1176,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
if (evt->reg_id == event_req->ev_reg_id) {
lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
+ dd_data = (struct bsg_job_data *)evt->dd_data;
break;
}
}
@@ -1102,6 +1184,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
if (&evt->node == &phba->ct_ev_waiters) {
/* no event waiting struct yet - first call */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (dd_data == NULL) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2734 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto job_error;
+ }
evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
event_req->ev_req_id);
if (!evt) {
@@ -1111,7 +1200,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
rc = -ENOMEM;
goto job_error;
}
-
+ dd_data->type = TYPE_EVT;
+ dd_data->set_job = NULL;
+ dd_data->context_un.evt = evt;
+ evt->dd_data = (void *)dd_data;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_add(&evt->node, &phba->ct_ev_waiters);
lpfc_bsg_event_ref(evt);
@@ -1121,9 +1213,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
spin_lock_irqsave(&phba->ct_ev_lock, flags);
evt->waiting = 1;
- dd_data->type = TYPE_EVT;
- dd_data->context_un.evt = evt;
- evt->set_job = job; /* for unsolicited command */
+ dd_data->set_job = job; /* for unsolicited command */
job->dd_data = dd_data; /* for fc transport timeout callback*/
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return 0; /* call job done later */
@@ -1252,57 +1342,64 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ dd_data = cmdiocbq->context1;
+
+ /* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
- dd_data = cmdiocbq->context2;
- /* normal completion and timeout crossed paths, already done */
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- return;
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
}
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job = dd_data->context_un.iocb.set_job;
- bmp = dd_data->context_un.iocb.bmp;
- rsp = &rspiocbq->iocb;
ndlp = dd_data->context_un.iocb.ndlp;
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
+ rsp = &rspiocbq->iocb;
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ /* Copy the completed job data or set the error status */
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
+ if (job) {
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
rc = -EACCES;
- break;
}
- } else
- rc = -EACCES;
- } else
- job->reply->reply_payload_rcv_len =
- rsp->un.genreq64.bdl.bdeSize;
+ } else {
+ job->reply->reply_payload_rcv_len = 0;
+ }
+ }
+ lpfc_free_bsg_buffers(phba, cmp);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ kfree(bmp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
lpfc_nlp_put(ndlp);
- kfree(bmp);
kfree(dd_data);
- /* make error code available to userspace */
- job->reply->result = rc;
- job->dd_data = NULL;
- /* complete the job back to userspace */
- job->job_done(job);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Complete the job if the job is still active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
return;
}
@@ -1316,7 +1413,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
**/
static int
lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
- struct lpfc_dmabuf *bmp, int num_entry)
+ struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
+ int num_entry)
{
IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
@@ -1377,7 +1475,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
/* Check if the ndlp is active */
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = -IOCB_ERROR;
+ rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
@@ -1385,7 +1483,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
* we respond
*/
if (!lpfc_nlp_get(ndlp)) {
- rc = -IOCB_ERROR;
+ rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
@@ -1407,17 +1505,17 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
+ ctiocb->context1 = dd_data;
+ ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
-
ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
- ctiocb->context2 = dd_data;
- ctiocb->context1 = ndlp;
+
dd_data->type = TYPE_IOCB;
+ dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = ctiocb;
- dd_data->context_un.iocb.rspiocbq = NULL;
- dd_data->context_un.iocb.set_job = job;
- dd_data->context_un.iocb.bmp = bmp;
dd_data->context_un.iocb.ndlp = ndlp;
+ dd_data->context_un.iocb.rmp = NULL;
+ job->dd_data = dd_data;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -1454,11 +1552,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
job->request->rqst_data.h_vendor.vendor_cmd;
struct ulp_bde64 *bpl;
- struct lpfc_dmabuf *bmp = NULL;
- struct scatterlist *sgel = NULL;
- int request_nseg;
- int numbde;
- dma_addr_t busaddr;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
+ int bpl_entries;
uint32_t tag = mgmt_resp->tag;
unsigned long reqbfrcnt =
(unsigned long)job->request_payload.payload_len;
@@ -1486,30 +1581,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *) bmp->virt;
- request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
+ bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &bpl_entries);
+ if (!cmp) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_free_bmp;
}
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
- rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+ rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
- /* TBD need to handle a timeout */
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
rc = -EACCES;
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+ lpfc_free_bsg_buffers(phba, cmp);
send_mgmt_rsp_free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
send_mgmt_rsp_exit:
/* make error code available to userspace */
@@ -1559,7 +1652,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
scsi_block_requests(shost);
}
- while (pring->txcmplq_cnt) {
+ while (!list_empty(&pring->txcmplq)) {
if (i++ > 500) /* wait up to 5 seconds */
break;
msleep(10);
@@ -3193,13 +3286,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint8_t *pmb, *pmb_buf;
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
- /* job already timed out? */
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- return;
- }
/*
* The outgoing buffer is readily referred from the dma buffer,
@@ -3209,29 +3296,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
- job = dd_data->context_un.mbox.set_job;
+ /* Determine if job has been aborted */
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Copy the mailbox data to the job if it is still active */
+
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
- /* need to hold the lock until we set job->dd_data to NULL
- * to hold off the timeout handler returning to the mid-layer
- * while we are still processing the job.
- */
- job->dd_data = NULL;
- dd_data->context_un.mbox.set_job = NULL;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- } else {
- dd_data->context_un.mbox.set_job = NULL;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
+ dd_data->set_job = NULL;
mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
kfree(dd_data);
+ /* Complete the job if the job is still active */
+
if (job) {
job->reply->result = 0;
job->job_done(job);
@@ -3377,19 +3468,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
struct lpfc_sli_config_mbox *sli_cfg_mbx;
uint8_t *pmbx;
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
- /* has the job already timed out? */
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job = NULL;
- goto job_done_out;
+
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
}
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
/*
* The outgoing buffer is readily referred from the dma buffer,
* just need to get header part from mailboxq structure.
*/
+
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
/* Copy the byte swapped response mailbox back to the user */
@@ -3406,21 +3500,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
}
- job = dd_data->context_un.mbox.set_job;
+ /* Complete the job if the job is still active */
+
if (job) {
size = job->reply_payload.payload_len;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
pmb_buf, size);
+
/* result for successful */
job->reply->result = 0;
- job->dd_data = NULL;
- /* need to hold the lock util we set job->dd_data to NULL
- * to hold off the timeout handler from midlayer to take
- * any action.
- */
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2937 SLI_CONFIG ext-buffer maibox command "
"(x%x/x%x) complete bsg job done, bsize:%d\n",
@@ -3431,20 +3522,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
phba->mbox_ext_buf_ctx.mboxType,
dma_ebuf, sta_pos_addr,
phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
- } else
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
-job_done_out:
- if (!job)
+ } else {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2938 SLI_CONFIG ext-buffer maibox "
"command (x%x/x%x) failure, rc:x%x\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, rc);
+ }
+
+
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
kfree(dd_data);
-
return job;
}
@@ -3461,8 +3550,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct fc_bsg_job *job;
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
/* handle the BSG job with mailbox command */
- if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ if (!job)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3470,15 +3561,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
- job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
lpfc_bsg_mbox_ext_session_reset(phba);
/* free base driver mailbox structure memory */
mempool_free(pmboxq, phba->mbox_mem_pool);
- /* complete the bsg job if we have it */
+ /* if the job is still active, call job done */
if (job)
job->job_done(job);
@@ -3498,8 +3587,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
struct fc_bsg_job *job;
+ job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
/* handle the BSG job with the mailbox command */
- if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+ if (!job)
pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3507,13 +3598,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
"complete, ctxState:x%x, mbxStatus:x%x\n",
phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
- job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
/* free all memory, including dma buffers */
mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_mbox_ext_session_reset(phba);
- /* complete the bsg job if we have it */
+ /* if the job is still active, call job done */
if (job)
job->job_done(job);
@@ -3759,9 +3848,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
- dd_data->context_un.mbox.set_job = job;
job->dd_data = dd_data;
/* state change */
@@ -3928,14 +4017,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
- dd_data->context_un.mbox.set_job = job;
job->dd_data = dd_data;
/* state change */
- phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3951,6 +4040,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
/* wait for additoinal external buffers */
+
job->reply->result = 0;
job->job_done(job);
return SLI_CONFIG_HANDLED;
@@ -4268,9 +4358,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* context fields to callback function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
- dd_data->context_un.mbox.set_job = job;
job->dd_data = dd_data;
/* state change */
@@ -4455,7 +4545,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint8_t *from;
uint32_t size;
-
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
@@ -4681,9 +4770,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = dd_data;
dd_data->type = TYPE_MBOX;
+ dd_data->set_job = job;
dd_data->context_un.mbox.pmboxq = pmboxq;
dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
- dd_data->context_un.mbox.set_job = job;
dd_data->context_un.mbox.ext = ext;
dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
@@ -4797,75 +4886,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct fc_bsg_job *job;
IOCB_t *rsp;
- struct lpfc_dmabuf *bmp;
+ struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_bsg_menlo *menlo;
unsigned long flags;
struct menlo_response *menlo_resp;
+ unsigned int rsp_size;
int rc = 0;
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = cmdiocbq->context1;
- if (!dd_data) {
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- return;
- }
-
+ cmp = cmdiocbq->context2;
+ bmp = cmdiocbq->context3;
menlo = &dd_data->context_un.menlo;
- job = menlo->set_job;
- job->dd_data = NULL; /* so timeout handler does not reply */
-
- spin_lock(&phba->hbalock);
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
- if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
- spin_unlock(&phba->hbalock);
-
- bmp = menlo->bmp;
- rspiocbq = menlo->rspiocbq;
+ rmp = menlo->rmp;
rsp = &rspiocbq->iocb;
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ /* Determine if job has been aborted */
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ job = dd_data->set_job;
+ if (job) {
+ /* Prevent timeout handling from trying to abort job */
+ job->dd_data = NULL;
+ }
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Copy the job data or set the failing status for the job */
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a continuation
- * of the exchange.
- */
- menlo_resp = (struct menlo_response *)
- job->reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
+ if (job) {
+ /* always return the xri, this would be used in the case
+ * of a menlo download to allow the data to be sent as a
+ * continuation of the exchange.
+ */
+
+ menlo_resp = (struct menlo_response *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+ menlo_resp->xri = rsp->ulpContext;
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else {
rc = -EACCES;
- break;
}
- } else
- rc = -EACCES;
- } else
- job->reply->reply_payload_rcv_len =
- rsp->un.genreq64.bdl.bdeSize;
+ } else {
+ rsp_size = rsp->un.genreq64.bdl.bdeSize;
+ job->reply->reply_payload_rcv_len =
+ lpfc_bsg_copy_data(rmp, &job->reply_payload,
+ rsp_size, 0);
+ }
+
+ }
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- lpfc_sli_release_iocbq(phba, rspiocbq);
lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_free_bsg_buffers(phba, cmp);
+ lpfc_free_bsg_buffers(phba, rmp);
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
kfree(dd_data);
- /* make error code available to userspace */
- job->reply->result = rc;
- /* complete the job back to userspace */
- job->job_done(job);
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ /* Complete the job if active */
+
+ if (job) {
+ job->reply->result = rc;
+ job->job_done(job);
+ }
+
return;
}
@@ -4883,17 +4976,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
+ struct lpfc_iocbq *cmdiocbq;
+ IOCB_t *cmd;
int rc = 0;
struct menlo_command *menlo_cmd;
struct menlo_response *menlo_resp;
- struct lpfc_dmabuf *bmp = NULL;
+ struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
int request_nseg;
int reply_nseg;
- struct scatterlist *sgel = NULL;
- int numbde;
- dma_addr_t busaddr;
struct bsg_job_data *dd_data;
struct ulp_bde64 *bpl = NULL;
@@ -4948,50 +5038,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
goto free_dd;
}
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
rc = -ENOMEM;
goto free_bmp;
}
- rspiocbq = lpfc_sli_get_iocbq(phba);
- if (!rspiocbq) {
- rc = -ENOMEM;
- goto free_cmdiocbq;
- }
-
- rsp = &rspiocbq->iocb;
+ INIT_LIST_HEAD(&bmp->list);
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+ cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+ 1, bpl, &request_nseg);
+ if (!cmp) {
rc = -ENOMEM;
- goto free_rspiocbq;
+ goto free_bmp;
}
+ lpfc_bsg_copy_data(cmp, &job->request_payload,
+ job->request_payload.payload_len, 1);
- INIT_LIST_HEAD(&bmp->list);
- bpl = (struct ulp_bde64 *) bmp->virt;
- request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
+ bpl += request_nseg;
+ reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+ rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+ bpl, &reply_nseg);
+ if (!rmp) {
+ rc = -ENOMEM;
+ goto free_cmp;
}
- reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
- for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
- busaddr = sg_dma_address(sgel);
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
- bpl->tus.f.bdeSize = sg_dma_len(sgel);
- bpl->tus.w = cpu_to_le32(bpl->tus.w);
- bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
- bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
- bpl++;
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = -ENOMEM;
+ goto free_rmp;
}
cmd = &cmdiocbq->iocb;
@@ -5013,11 +5091,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
cmdiocbq->vport = phba->pport;
/* We want the firmware to timeout before we do */
cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->context3 = bmp;
- cmdiocbq->context2 = rspiocbq;
cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = rspiocbq;
+ cmdiocbq->context2 = cmp;
+ cmdiocbq->context3 = bmp;
if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
cmd->ulpPU = MENLO_PU; /* 3 */
@@ -5031,29 +5108,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
}
dd_data->type = TYPE_MENLO;
+ dd_data->set_job = job;
dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rspiocbq = rspiocbq;
- dd_data->context_un.menlo.set_job = job;
- dd_data->context_un.menlo.bmp = bmp;
+ dd_data->context_un.menlo.rmp = rmp;
+ job->dd_data = dd_data;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
MENLO_TIMEOUT - 5);
if (rc == IOCB_SUCCESS)
return 0; /* done for now */
- /* iocb failed so cleanup */
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
-free_rspiocbq:
- lpfc_sli_release_iocbq(phba, rspiocbq);
-free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rmp:
+ lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+ lpfc_free_bsg_buffers(phba, cmp);
free_bmp:
+ if (bmp->virt)
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
free_dd:
kfree(dd_data);
@@ -5162,70 +5235,94 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
- struct lpfc_bsg_event *evt;
- struct lpfc_bsg_iocb *iocb;
- struct lpfc_bsg_mbox *mbox;
- struct lpfc_bsg_menlo *menlo;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct bsg_job_data *dd_data;
unsigned long flags;
+ int rc = 0;
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *check_iocb, *next_iocb;
+
+ /* if job's driver data is NULL, the command completed or is in the
+ * the process of completing. In this case, return status to request
+ * so the timeout is retried. This avoids double completion issues
+ * and the request will be pulled off the timer queue when the
+ * command's completion handler executes. Otherwise, prevent the
+ * command's completion handler from executing the job done callback
+ * and continue processing to abort the outstanding the command.
+ */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = (struct bsg_job_data *)job->dd_data;
- /* timeout and completion crossed paths if no dd_data */
- if (!dd_data) {
+ if (dd_data) {
+ dd_data->set_job = NULL;
+ job->dd_data = NULL;
+ } else {
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- return 0;
+ return -EAGAIN;
}
switch (dd_data->type) {
case TYPE_IOCB:
- iocb = &dd_data->context_un.iocb;
- cmdiocb = iocb->cmdiocbq;
- /* hint to completion handler that the job timed out */
- job->reply->result = -EAGAIN;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- /* this will call our completion handler */
+ /* Check to see if IOCB was issued to the port or not. If not,
+ * remove it from the txq queue and call cancel iocbs.
+ * Otherwise, call abort iotag
+ */
+
+ cmdiocb = dd_data->context_un.iocb.cmdiocbq;
spin_lock_irq(&phba->hbalock);
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+ list) {
+ if (check_iocb == cmdiocb) {
+ list_move_tail(&check_iocb->list, &completions);
+ break;
+ }
+ }
+ if (list_empty(&completions))
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ if (!list_empty(&completions)) {
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ }
break;
+
case TYPE_EVT:
- evt = dd_data->context_un.evt;
- /* this event has no job anymore */
- evt->set_job = NULL;
- job->dd_data = NULL;
- job->reply->reply_payload_rcv_len = 0;
- /* Return -EAGAIN which is our way of signallying the
- * app to retry.
- */
- job->reply->result = -EAGAIN;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
break;
+
case TYPE_MBOX:
- mbox = &dd_data->context_un.mbox;
- /* this mbox has no job anymore */
- mbox->set_job = NULL;
- job->dd_data = NULL;
- job->reply->reply_payload_rcv_len = 0;
- job->reply->result = -EAGAIN;
- /* the mbox completion handler can now be run */
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- job->job_done(job);
+ /* Update the ext buf ctx state if needed */
+
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
case TYPE_MENLO:
- menlo = &dd_data->context_un.menlo;
- cmdiocb = menlo->cmdiocbq;
- /* hint to completion handler that the job timed out */
- job->reply->result = -EAGAIN;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- /* this will call our completion handler */
+ /* Check to see if IOCB was issued to the port or not. If not,
+ * remove it from the txq queue and call cancel iocbs.
+ * Otherwise, call abort iotag.
+ */
+
+ cmdiocb = dd_data->context_un.menlo.cmdiocbq;
spin_lock_irq(&phba->hbalock);
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+ list) {
+ if (check_iocb == cmdiocb) {
+ list_move_tail(&check_iocb->list, &completions);
+ break;
+ }
+ }
+ if (list_empty(&completions))
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ if (!list_empty(&completions)) {
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ }
break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -5236,5 +5333,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
* otherwise an error message will be displayed on the console
* so always return success (zero)
*/
- return 0;
+ return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76ca65dae781..7631893ae005 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
+int lpfc_unregister_fcf_prep(struct lpfc_hba *);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 08d156a9094f..bbed8471bf0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -484,6 +484,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
vport->port_state = LPFC_FABRIC_CFG_LINK;
memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
mboxq->context1 = dmabuf;
@@ -700,6 +701,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
+ /*
+ * For FC we need to do some special processing because of the SLI
+ * Port's default settings of the Common Service Parameters.
+ */
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+ /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+ if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+ lpfc_unregister_fcf_prep(phba);
+
+ /* This should just update the VFI CSPs*/
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_issue_reg_vfi(vport);
+ }
+
if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
@@ -6225,7 +6240,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
- if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+ if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
}
@@ -6279,7 +6294,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
continue;
list_move_tail(&piocb->list, &completions);
- pring->txq_cnt--;
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -6339,7 +6353,6 @@ lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
cmd->ulpCommand == CMD_ABORT_XRI_CN)
continue;
list_move_tail(&piocb->list, &completions);
- pring->txq_cnt--;
}
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@@ -8065,7 +8078,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
rxid, 1);
/* Check if TXQ queue needs to be serviced */
- if (pring->txq_cnt)
+ if (!(list_empty(&pring->txq)))
lpfc_worker_wake_up(phba);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfda18467ee6..326e05a65a73 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,12 +691,15 @@ lpfc_work_done(struct lpfc_hba *phba)
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
- lpfc_sli_handle_slow_ring_event(phba, pring,
- (status &
- HA_RXMASK));
+ if (phba->link_state >= LPFC_LINK_UP) {
+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+ lpfc_sli_handle_slow_ring_event(phba, pring,
+ (status &
+ HA_RXMASK));
+ }
}
- if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
+ if ((phba->sli_rev == LPFC_SLI_REV4) &
+ (!list_empty(&pring->txq)))
lpfc_drain_txq(phba);
/*
* Turn on Ring interrupts
@@ -1792,6 +1795,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
virt_addr = mboxq->sge_array->addr[0];
shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+ lpfc_sli_pcimem_bcopy(shdr, shdr,
+ sizeof(union lpfc_sli4_cfg_shdr));
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status) {
@@ -2888,6 +2893,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out_free_mem;
}
+
+ /* If the VFI is already registered, there is nothing else to do */
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ goto out_free_mem;
+
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
@@ -2980,6 +2990,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
+ uint32_t fc_flags = 0;
spin_lock_irq(&phba->hbalock);
switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@@ -3011,11 +3022,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */
- if (bf_get(lpfc_mbx_read_top_il, la)) {
- spin_lock(shost->host_lock);
- vport->fc_flag |= FC_LBIT;
- spin_unlock(shost->host_lock);
- }
+ if (bf_get(lpfc_mbx_read_top_il, la))
+ fc_flags |= FC_LBIT;
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3064,12 +3072,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
- spin_lock(shost->host_lock);
- vport->fc_flag |= FC_LBIT;
- spin_unlock(shost->host_lock);
+ fc_flags |= FC_LBIT;
}
spin_unlock_irq(&phba->hbalock);
+ if (fc_flags) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= fc_flags;
+ spin_unlock_irq(shost->host_lock);
+ }
+
lpfc_linkup(phba);
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
@@ -3237,8 +3249,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~FC_BYPASSED_MODE;
spin_unlock_irq(shost->host_lock);
- if ((phba->fc_eventTag < la->eventTag) ||
- (phba->fc_eventTag == la->eventTag)) {
+ if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
if (phba->fc_eventTag != 0)
@@ -3246,16 +3257,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- spin_lock_irq(&phba->hbalock);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irq(&phba->hbalock);
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ spin_lock_irq(&phba->hbalock);
+ if (bf_get(lpfc_mbx_read_top_mm, la))
+ phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+ else
+ phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+ spin_unlock_irq(&phba->hbalock);
+ }
phba->link_events++;
if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
- (!bf_get(lpfc_mbx_read_top_mm, la))) {
+ !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3300,8 +3313,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
- (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
+ if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
+ ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
if (phba->link_state != LPFC_LINK_DOWN) {
phba->fc_stat.LinkDown++;
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3329,8 +3342,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
}
- if (bf_get(lpfc_mbx_read_top_fa, la)) {
- if (bf_get(lpfc_mbx_read_top_mm, la))
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ bf_get(lpfc_mbx_read_top_fa, la)) {
+ if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
lpfc_issue_clear_la(phba, vport);
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
@@ -4354,7 +4368,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
with an error */
list_move_tail(&iocb->list,
&completions);
- pring->txq_cnt--;
}
}
spin_unlock_irq(&phba->hbalock);
@@ -5055,7 +5068,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
(icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
list_move_tail(&iocb->list, &completions);
- pring->txq_cnt--;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e93b886cd4d..1dd2f6f0a127 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1958,6 +1958,9 @@ struct lpfc_mbx_init_vfi {
struct lpfc_mbx_reg_vfi {
uint32_t word1;
+#define lpfc_reg_vfi_upd_SHIFT 29
+#define lpfc_reg_vfi_upd_MASK 0x00000001
+#define lpfc_reg_vfi_upd_WORD word1
#define lpfc_reg_vfi_vp_SHIFT 28
#define lpfc_reg_vfi_vp_MASK 0x00000001
#define lpfc_reg_vfi_vp_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 314b4f61b9e3..5da297290262 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -839,7 +839,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
* way, nothing should be on txcmplq as it will NEVER complete.
*/
list_splice_init(&pring->txcmplq, &completions);
- pring->txcmplq_cnt = 0;
spin_unlock_irq(&phba->hbalock);
/* Cancel all the IOCBs from the completions list */
@@ -2915,9 +2914,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3014,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_lxritag = lxri;
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
- spin_lock(&phba->scsi_buf_list_lock);
+ spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
- spin_unlock(&phba->scsi_buf_list_lock);
+ spin_unlock_irq(&phba->scsi_buf_list_lock);
return 0;
@@ -4004,6 +4003,52 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform FCF recovery when the in-use FCF either dead or
+ * got modified.
+ **/
+static void
+lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
+ struct lpfc_acqe_fip *acqe_fip)
+{
+ int rc;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Mark the fast failover process in progress */
+ phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2771 Start FCF fast failover process due to in-use "
+ "FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
+ acqe_fip->event_tag, acqe_fip->index);
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2772 Issue FCF rediscover mabilbox command "
+ "failed, fail through to FCF dead event\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Last resort will fail over by treating this as a link
+ * down to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ } else {
+ /* Reset FCF roundrobin bmask for new discovery */
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ /*
+ * Handling fast FCF failover to a DEAD FCF event is
+ * considered equalivant to receiving CVL to all vports.
+ */
+ lpfc_sli4_perform_all_vport_cvl(phba);
+ }
+}
+
+/**
* lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async fcoe completion queue entry.
@@ -4068,9 +4113,22 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
}
- /* If the FCF has been in discovered state, do nothing. */
- if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+ /* If FCF has been in discovered state, perform rediscovery
+ * only if the FCF with the same index of the in-use FCF got
+ * modified during normal operation. Otherwise, do nothing.
+ */
+ if (phba->pport->port_state > LPFC_FLOGI) {
spin_unlock_irq(&phba->hbalock);
+ if (phba->fcf.current_rec.fcf_indx ==
+ acqe_fip->index) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "3300 In-use FCF (%d) "
+ "modified, perform FCF "
+ "rediscovery\n",
+ acqe_fip->index);
+ lpfc_sli4_perform_inuse_fcf_recovery(phba,
+ acqe_fip);
+ }
break;
}
spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4181,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* is no longer valid as we are not in the middle of FCF
* failover process already.
*/
- spin_lock_irq(&phba->hbalock);
- /* Mark the fast failover process in progress */
- phba->fcf.fcf_flag |= FCF_DEAD_DISC;
- spin_unlock_irq(&phba->hbalock);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
- "2771 Start FCF fast failover process due to "
- "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
- "\n", acqe_fip->event_tag, acqe_fip->index);
- rc = lpfc_sli4_redisc_fcf_table(phba);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
- LOG_DISCOVERY,
- "2772 Issue FCF rediscover mabilbox "
- "command failed, fail through to FCF "
- "dead event\n");
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
- spin_unlock_irq(&phba->hbalock);
- /*
- * Last resort will fail over by treating this
- * as a link down to FCF registration.
- */
- lpfc_sli4_fcf_dead_failthrough(phba);
- } else {
- /* Reset FCF roundrobin bmask for new discovery */
- lpfc_sli4_clear_fcf_rr_bmask(phba);
- /*
- * Handling fast FCF failover to a DEAD FCF event is
- * considered equalivant to receiving CVL to all vports.
- */
- lpfc_sli4_perform_all_vport_cvl(phba);
- }
+ lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
break;
case LPFC_FIP_EVENT_TYPE_CVL:
phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index efc9cd9def8b..a7a9fa468308 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2126,32 +2126,40 @@ void
lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
{
struct lpfc_mbx_reg_vfi *reg_vfi;
+ struct lpfc_hba *phba = vport->phba;
memset(mbox, 0, sizeof(*mbox));
reg_vfi = &mbox->u.mqe.un.reg_vfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
bf_set(lpfc_reg_vfi_vfi, reg_vfi,
- vport->phba->sli4_hba.vfi_ids[vport->vfi]);
- bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
- bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
+ phba->sli4_hba.vfi_ids[vport->vfi]);
+ bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
+ bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
- reg_vfi->e_d_tov = vport->phba->fc_edtov;
- reg_vfi->r_a_tov = vport->phba->fc_ratov;
+ reg_vfi->e_d_tov = phba->fc_edtov;
+ reg_vfi->r_a_tov = phba->fc_ratov;
reg_vfi->bde.addrHigh = putPaddrHigh(phys);
reg_vfi->bde.addrLow = putPaddrLow(phys);
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+ /* Only FC supports upd bit */
+ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
+ (vport->fc_flag & FC_VFI_REGISTERED)) {
+ bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
+ bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
" vfi:%d, vpi:%d, fc_pname:%x%x\n",
vport->fc_myDID,
- vport->phba->fcf.fcfi,
- vport->phba->sli4_hba.vfi_ids[vport->vfi],
- vport->phba->vpi_ids[vport->vpi],
+ phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[vport->vfi],
+ phba->vpi_ids[vport->vpi],
reg_vfi->wwn[0], reg_vfi->wwn[1]);
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 46128c679202..82f4d3542289 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -226,7 +226,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
/* It matches, so deque and call compl with anp error */
list_move_tail(&iocb->list, &completions);
- pring->txq_cnt--;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98af07c6e300..74b8710e1e90 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -732,7 +732,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
psb->exch_busy = 0;
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (pring->txq_cnt)
+ if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
return;
@@ -885,9 +885,9 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
int num_posted, rc = 0;
/* get all SCSI buffers need to repost to a local list */
- spin_lock(&phba->scsi_buf_list_lock);
+ spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
- spin_unlock(&phba->scsi_buf_list_lock);
+ spin_unlock_irq(&phba->scsi_buf_list_lock);
/* post the list of scsi buffer sgls to port if available */
if (!list_empty(&post_sblist)) {
@@ -4246,7 +4246,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
unsigned long poll_tmo_expires =
(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
- if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
mod_timer(&phba->fcp_poll_timer,
poll_tmo_expires);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..35dd17eb0f27 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -873,14 +873,16 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
- rrq->send_rrq = send_rrq;
+ if (phba->cfg_enable_rrq == 1)
+ rrq->send_rrq = send_rrq;
+ else
+ rrq->send_rrq = 0;
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
- rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -1009,6 +1011,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
+ /*
+ ** This should have been removed from the txcmplq before calling
+ ** iocbq_release. The normal completion
+ ** path should have already done the list_del_init.
+ */
+ if (unlikely(!list_empty(&iocbq->list))) {
+ if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
+ iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ list_del_init(&iocbq->list);
+ }
+
+
if (sglq) {
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
@@ -1025,7 +1039,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
&phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
- if (pring->txq_cnt)
+ if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
}
}
@@ -1057,6 +1071,14 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
/*
+ ** This should have been removed from the txcmplq before calling
+ ** iocbq_release. The normal completion
+ ** path should have already done the list_del_init.
+ */
+ if (unlikely(!list_empty(&iocbq->list)))
+ list_del_init(&iocbq->list);
+
+ /*
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
@@ -1122,7 +1144,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
else {
@@ -1310,9 +1331,6 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt++;
- if (pring->txcmplq_cnt > pring->txcmplq_max)
- pring->txcmplq_max = pring->txcmplq_cnt;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -1344,8 +1362,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
struct lpfc_iocbq *cmd_iocb;
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
- if (cmd_iocb != NULL)
- pring->txq_cnt--;
return cmd_iocb;
}
@@ -1614,8 +1630,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* (c) link attention events can be processed (fcp ring only)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
- if (pring->txq_cnt &&
- lpfc_is_link_up(phba) &&
+
+ if (lpfc_is_link_up(phba) &&
+ (!list_empty(&pring->txq)) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
@@ -2612,7 +2629,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
- pring->txcmplq_cnt--;
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
}
return cmd_iocb;
@@ -2650,7 +2666,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- pring->txcmplq_cnt--;
return cmd_iocb;
}
}
@@ -3499,7 +3514,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
*/
spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@@ -3536,11 +3550,9 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Retrieve everything on txq */
list_splice_init(&pring->txq, &txq);
- pring->txq_cnt = 0;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@@ -5988,9 +6000,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
@@ -6091,10 +6103,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* push els sgls posted to the availble list */
if (!list_empty(&post_sgl_list)) {
- spin_lock(&phba->hbalock);
+ spin_lock_irq(&phba->hbalock);
list_splice_init(&post_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3161 Failure to post els sgl to port.\n");
@@ -7615,7 +7627,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
- pring->txq_cnt++;
}
/**
@@ -8387,7 +8398,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
- if (pring->txq_cnt) {
+ if (!list_empty(&pring->txq)) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
pring, piocb);
@@ -9055,7 +9066,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
- pring->txq_cnt--;
}
/* Next issue ABTS for everything on the txcmplq */
@@ -9124,8 +9134,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
- pring->txq_cnt = 0;
-
}
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -9966,6 +9974,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
+ struct lpfc_iocbq *iocb;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* If the caller has provided a response iocbq buffer, then context2
@@ -10013,9 +10024,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = IOCB_TIMEDOUT;
}
} else if (retval == IOCB_BUSY) {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
- phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+ if (phba->cfg_log_verbose & LOG_SLI) {
+ list_for_each_entry(iocb, &pring->txq, list) {
+ txq_cnt++;
+ }
+ list_for_each_entry(iocb, &pring->txcmplq, list) {
+ txcmplq_cnt++;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+ }
return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -11298,16 +11317,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
struct lpfc_sli_ring *pring = cq->pring;
+ int txq_cnt = 0;
+ int txcmplq_cnt = 0;
+ int fcp_txcmplq_cnt = 0;
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
+ if (!list_empty(&pring->txq))
+ txq_cnt++;
+ if (!list_empty(&pring->txcmplq))
+ txcmplq_cnt++;
+ if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ fcp_txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
- pring->txq_cnt, phba->iocb_cnt,
- phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
- phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+ txq_cnt, phba->iocb_cnt,
+ fcp_txcmplq_cnt,
+ txcmplq_cnt);
return false;
}
@@ -15482,11 +15510,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
LPFC_SLI4_FCF_TBL_INDX_MAX);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"3060 Last IDX %d\n", last_index);
- if (list_empty(&phba->fcf.fcf_pri_list)) {
+
+ /* Verify the priority list has 2 or more entries */
+ spin_lock_irq(&phba->hbalock);
+ if (list_empty(&phba->fcf.fcf_pri_list) ||
+ list_is_singular(&phba->fcf.fcf_pri_list)) {
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"3061 Last IDX %d\n", last_index);
return 0; /* Empty rr list */
}
+ spin_unlock_irq(&phba->hbalock);
+
next_fcf_pri = 0;
/*
* Clear the rr_bmask and set all of the bits that are at this
@@ -16245,14 +16280,19 @@ lpfc_drain_txq(struct lpfc_hba *phba)
char *fail_msg = NULL;
struct lpfc_sglq *sglq;
union lpfc_wqe wqe;
+ int txq_cnt = 0;
spin_lock_irqsave(&phba->hbalock, iflags);
- if (pring->txq_cnt > pring->txq_max)
- pring->txq_max = pring->txq_cnt;
+ list_for_each_entry(piocbq, &pring->txq, list) {
+ txq_cnt++;
+ }
+
+ if (txq_cnt > pring->txq_max)
+ pring->txq_max = txq_cnt;
spin_unlock_irqrestore(&phba->hbalock, iflags);
- while (pring->txq_cnt) {
+ while (!list_empty(&pring->txq)) {
spin_lock_irqsave(&phba->hbalock, iflags);
piocbq = lpfc_sli_ringtx_get(phba, pring);
@@ -16260,7 +16300,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2823 txq empty and txq_cnt is %d\n ",
- pring->txq_cnt);
+ txq_cnt);
break;
}
sglq = __lpfc_sli_get_sglq(phba, piocbq);
@@ -16269,6 +16309,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->hbalock, iflags);
break;
}
+ txq_cnt--;
/* The xri and iocb resources secured,
* attempt to issue request
@@ -16300,5 +16341,5 @@ lpfc_drain_txq(struct lpfc_hba *phba)
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
- return pring->txq_cnt;
+ return txq_cnt;
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3b7795a296b..664cd04f7cd8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.37"
+#define LPFC_DRIVER_VERSION "8.3.38"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index dce7d788cdc9..c37b244cf8ae 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_target.o
+ qla_nx.o qla_mr.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dcc2619..bf60c631abb5 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -888,7 +888,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
struct qla_hw_data *ha = vha->hw;
uint32_t sn;
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLAFX00(vha->hw)) {
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->mr.serial_num);
+ } else if (IS_FWI2_CAPABLE(ha)) {
qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
return snprintf(buf, PAGE_SIZE, "%s\n", buf);
}
@@ -912,6 +915,11 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLAFX00(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->mr.hw_version);
+
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
@@ -922,6 +930,11 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ if (IS_QLAFX00(vha->hw))
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ vha->hw->mr.product_name);
+
return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
}
@@ -1304,6 +1317,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
+ uint32_t pstate;
+
+ if (IS_QLAFX00(vha->hw)) {
+ pstate = qlafx00_fw_state_show(dev, attr, buf);
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+ }
if (qla2x00_reset_active(vha))
ql_log(ql_log_warn, vha, 0x707c,
@@ -1454,6 +1473,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
(shost_priv(shost)))->hw;
u32 speed = FC_PORTSPEED_UNKNOWN;
+ if (IS_QLAFX00(ha)) {
+ qlafx00_get_host_speed(shost);
+ return;
+ }
+
switch (ha->link_data_rate) {
case PORT_SPEED_1GB:
speed = FC_PORTSPEED_1GBIT;
@@ -1637,6 +1661,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ if (IS_QLAFX00(vha->hw))
+ return 0;
+
qla2x00_loop_reset(vha);
return 0;
}
@@ -1655,6 +1682,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ if (IS_QLAFX00(vha->hw))
+ goto done;
+
if (test_bit(UNLOADING, &vha->dpc_flags))
goto done;
@@ -2087,6 +2117,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
FC_PORTSPEED_1GBIT;
else if (IS_QLA23XX(ha))
speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+ else if (IS_QLAFX00(ha))
+ speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+ FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
else
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ad54099cb805..39719f892488 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
- dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
- bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (sp->type == SRB_FXIOCB_BCMD) {
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
- dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ }
if (sp->type == SRB_CT_CMD ||
+ sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
qla2x00_rel_sp(vha, sp);
@@ -751,6 +768,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.transfer_size = req_data_len;
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+ elreq.iteration_count =
+ bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
@@ -1883,6 +1902,128 @@ done:
}
static int
+qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+ srb_t *sp;
+ int req_sg_cnt = 0, rsp_sg_cnt = 0;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_FX_MGMT";
+
+ /* Copy the IOCB specific information */
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ /* Dump the vendor information */
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
+ (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x70d0,
+ "Host is not online.\n");
+ rval = -EIO;
+ goto done;
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+ req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x70c7,
+ "dma_map_sg return %d for request\n", req_sg_cnt);
+ rval = -ENOMEM;
+ goto done;
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x70c8,
+ "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+ rval = -ENOMEM;
+ goto done_unmap_req_sg;
+ }
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x70c9,
+ "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x70ca,
+ "Failed to allocate fcport.\n");
+ rval = -ENOMEM;
+ goto done_unmap_rsp_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x70cb,
+ "qla2x00_get_sp failed.\n");
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->loop_id = piocb_rqst->dataword;
+
+ sp->type = SRB_FXIOCB_BCMD;
+ sp->name = "bsg_fx_mgmt";
+ sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+ sp->u.bsg_job = bsg_job;
+ sp->free = qla2x00_bsg_sp_free;
+ sp->done = qla2x00_bsg_job_done;
+
+ ql_dbg(ql_dbg_user, vha, 0x70cc,
+ "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
+ type, piocb_rqst->func_type, fcport->loop_id);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x70cd,
+ "qla2x00_start_sp failed=%d.\n", rval);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+
+done_unmap_rsp_sg:
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+done:
+ return rval;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1928,6 +2069,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_DIAG_IO_CMD:
return qla24xx_process_bidir_cmd(bsg_job);
+ case QL_VND_FX00_MGMT_CMD:
+ return qlafx00_mgmt_cmd(bsg_job);
default:
return -ENOSYS;
}
@@ -2007,7 +2150,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
- (sp->type == SRB_ELS_CMD_HST))
+ (sp->type == SRB_ELS_CMD_HST) ||
+ (sp->type == SRB_FXIOCB_BCMD))
&& (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index e9f6b9bbf29a..04f770332c2b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -22,6 +22,7 @@
#define QL_VND_DIAG_IO_CMD 0x0A
#define QL_VND_WRITE_I2C 0x10
#define QL_VND_READ_I2C 0x11
+#define QL_VND_FX00_MGMT_CMD 0x12
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f1c87c..cfa2a20dee97 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,28 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x115b | 0x111a-0x111b |
- * | | | 0x112c-0x112e |
- * | | | 0x113a |
+ * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1179 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
- * | Device Discovery | 0x2087 | 0x2020-0x2022, |
+ * | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3058 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
- * | | | 0x302d-0x302e |
- * | DPC Thread | 0x401d | 0x4002,0x4013 |
- * | Async Events | 0x5071 | 0x502b-0x502f |
+ * | | | 0x303d-0x3041 |
+ * | | | 0x302d,0x3033 |
+ * | | | 0x3036,0x3038 |
+ * | | | 0x303a |
+ * | DPC Thread | 0x4022 | 0x4002,0x4013 |
+ * | Async Events | 0x5081 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
+ * | | | 0x5040,0x5075 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70dd | 0x7018,0x702e, |
* | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
- * | | | 0x708c, |
+ * | | | 0x707b,0x708c, |
* | | | 0x70a5,0x70a6, |
* | | | 0x70a8,0x70ab, |
- * | | | 0x70ad-0x70ae |
+ * | | | 0x70ad-0x70ae, |
+ * | | | 0x70d1-0x70da |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff75936b..c32efc753229 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -245,7 +245,6 @@
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
-
/*
* Timeout timer counts in seconds
*/
@@ -265,6 +264,7 @@
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
+#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
struct req_que;
@@ -284,6 +284,7 @@ struct sd_dif_tuple {
struct srb_cmd {
struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
uint32_t request_sense_length;
+ uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
void *ctx;
};
@@ -321,7 +322,39 @@ struct srb_iocb {
uint32_t flags;
uint32_t lun;
uint32_t data;
+ struct completion comp;
+ uint32_t comp_status;
} tmf;
+ struct {
+#define SRB_FXDISC_REQ_DMA_VALID BIT_0
+#define SRB_FXDISC_RESP_DMA_VALID BIT_1
+#define SRB_FXDISC_REQ_DWRD_VALID BIT_2
+#define SRB_FXDISC_RSP_DWRD_VALID BIT_3
+#define FXDISC_TIMEOUT 20
+ uint8_t flags;
+ uint32_t req_len;
+ uint32_t rsp_len;
+ void *req_addr;
+ void *rsp_addr;
+ dma_addr_t req_dma_handle;
+ dma_addr_t rsp_dma_handle;
+ uint32_t adapter_id;
+ uint32_t adapter_id_hi;
+ uint32_t req_func_type;
+ uint32_t req_data;
+ uint32_t req_data_extra;
+ uint32_t result;
+ uint32_t seq_number;
+ uint32_t fw_flags;
+ struct completion fxiocb_comp;
+ uint32_t reserved_0;
+ uint8_t reserved_1;
+ } fxiocb;
+ struct {
+ uint32_t cmd_hndl;
+ uint32_t comp_status;
+ struct completion comp;
+ } abt;
} u;
struct timer_list timer;
@@ -338,6 +371,10 @@ struct srb_iocb {
#define SRB_TM_CMD 7
#define SRB_SCSI_CMD 8
#define SRB_BIDI_CMD 9
+#define SRB_FXIOCB_DCMD 10
+#define SRB_FXIOCB_BCMD 11
+#define SRB_ABT_CMD 12
+
typedef struct srb {
atomic_t ref_count;
@@ -368,6 +405,10 @@ typedef struct srb {
(sp->u.scmd.request_sense_ptr)
#define SET_CMD_SENSE_PTR(sp, ptr) \
(sp->u.scmd.request_sense_ptr = ptr)
+#define GET_FW_SENSE_LEN(sp) \
+ (sp->u.scmd.fw_sense_length)
+#define SET_FW_SENSE_LEN(sp, len) \
+ (sp->u.scmd.fw_sense_length = len)
struct msg_echo_lb {
dma_addr_t send_dma;
@@ -376,6 +417,7 @@ struct msg_echo_lb {
uint16_t rsp_sg_cnt;
uint16_t options;
uint32_t transfer_size;
+ uint32_t iteration_count;
};
/*
@@ -542,11 +584,74 @@ struct device_reg_25xxmq {
uint32_t atio_q_out;
};
+
+struct device_reg_fx00 {
+ uint32_t mailbox0; /* 00 */
+ uint32_t mailbox1; /* 04 */
+ uint32_t mailbox2; /* 08 */
+ uint32_t mailbox3; /* 0C */
+ uint32_t mailbox4; /* 10 */
+ uint32_t mailbox5; /* 14 */
+ uint32_t mailbox6; /* 18 */
+ uint32_t mailbox7; /* 1C */
+ uint32_t mailbox8; /* 20 */
+ uint32_t mailbox9; /* 24 */
+ uint32_t mailbox10; /* 28 */
+ uint32_t mailbox11;
+ uint32_t mailbox12;
+ uint32_t mailbox13;
+ uint32_t mailbox14;
+ uint32_t mailbox15;
+ uint32_t mailbox16;
+ uint32_t mailbox17;
+ uint32_t mailbox18;
+ uint32_t mailbox19;
+ uint32_t mailbox20;
+ uint32_t mailbox21;
+ uint32_t mailbox22;
+ uint32_t mailbox23;
+ uint32_t mailbox24;
+ uint32_t mailbox25;
+ uint32_t mailbox26;
+ uint32_t mailbox27;
+ uint32_t mailbox28;
+ uint32_t mailbox29;
+ uint32_t mailbox30;
+ uint32_t mailbox31;
+ uint32_t aenmailbox0;
+ uint32_t aenmailbox1;
+ uint32_t aenmailbox2;
+ uint32_t aenmailbox3;
+ uint32_t aenmailbox4;
+ uint32_t aenmailbox5;
+ uint32_t aenmailbox6;
+ uint32_t aenmailbox7;
+ /* Request Queue. */
+ uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
+ uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ /* Response Queue. */
+ uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
+ uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ /* Init values shadowed on FW Up Event */
+ uint32_t initval0; /* B0 */
+ uint32_t initval1; /* B4 */
+ uint32_t initval2; /* B8 */
+ uint32_t initval3; /* BC */
+ uint32_t initval4; /* C0 */
+ uint32_t initval5; /* C4 */
+ uint32_t initval6; /* C8 */
+ uint32_t initval7; /* CC */
+ uint32_t fwheartbeat; /* D0 */
+};
+
+
+
typedef union {
struct device_reg_2xxx isp;
struct device_reg_24xx isp24;
struct device_reg_25xxmq isp25mq;
struct device_reg_82xx isp82;
+ struct device_reg_fx00 ispfx00;
} device_reg_t;
#define ISP_REQ_Q_IN(ha, reg) \
@@ -602,6 +707,20 @@ typedef struct {
#define IOCTL_CMD BIT_2
} mbx_cmd_t;
+struct mbx_cmd_32 {
+ uint32_t out_mb; /* outbound from driver */
+ uint32_t in_mb; /* Incoming from RISC */
+ uint32_t mb[MAILBOX_REGISTER_COUNT];
+ long buf_size;
+ void *bufp;
+ uint32_t tov;
+ uint8_t flags;
+#define MBX_DMA_IN BIT_0
+#define MBX_DMA_OUT BIT_1
+#define IOCTL_CMD BIT_2
+};
+
+
#define MBX_TOV_SECONDS 30
/*
@@ -677,6 +796,15 @@ typedef struct {
#define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
+#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */
+#define MBA_FW_STARTING 0x8051 /* Firmware starting */
+#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
+#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
+#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
+#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
+#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
+ Notification */
+#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -798,6 +926,12 @@ typedef struct {
#define MBC_LUN_RESET 0x7E /* Send LUN reset */
/*
+ * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
+ * should be defined with MBC_MR_*
+ */
+#define MBC_MR_DRV_SHUTDOWN 0x6A
+
+/*
* ISP24xx mailbox commands
*/
#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
@@ -1058,6 +1192,30 @@ typedef struct {
uint8_t reserved_3[26];
} init_cb_t;
+
+struct init_cb_fx {
+ uint16_t version;
+ uint16_t reserved_1[13];
+ uint16_t request_q_outpointer;
+ uint16_t response_q_inpointer;
+ uint16_t reserved_2[2];
+ uint16_t response_q_length;
+ uint16_t request_q_length;
+ uint16_t reserved_3[2];
+ uint32_t request_q_address[2];
+ uint32_t response_q_address[2];
+ uint16_t reserved_4[4];
+ uint8_t response_q_msivec;
+ uint8_t reserved_5[19];
+ uint16_t interrupt_delay_timer;
+ uint16_t reserved_6;
+ uint32_t fwoptions1;
+ uint32_t fwoptions2;
+ uint32_t fwoptions3;
+ uint8_t reserved_7[24];
+};
+
+
/*
* Get Link Status mailbox command return buffer.
*/
@@ -1831,6 +1989,9 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ uint16_t tgt_id;
+ uint16_t old_tgt_id;
+
uint8_t fcp_prio;
uint8_t fabric_port_name[WWN_SIZE];
@@ -1848,8 +2009,15 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t scan_state;
+
+ unsigned long last_queue_full;
+ unsigned long last_ramp_up;
+
+ uint16_t port_id;
} fc_port_t;
+#include "qla_mr.h"
+
/*
* Fibre channel port/lun states.
*/
@@ -2391,6 +2559,7 @@ struct isp_operations {
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
int (*iospace_config)(struct qla_hw_data*);
+ int (*initialize_adapter)(struct scsi_qla_host *);
};
/* MSI-X Support *************************************************************/
@@ -2429,6 +2598,7 @@ enum qla_work_type {
QLA_EVT_ASYNC_ADISC,
QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
+ QLA_EVT_AENFX,
};
@@ -2456,7 +2626,15 @@ struct qla_work_evt {
u32 code;
#define QLA_UEVENT_CODE_FW_DUMP 0
} uevent;
- } u;
+ struct {
+ uint32_t evtcode;
+ uint32_t mbx[8];
+ uint32_t count;
+ } aenfx;
+ struct {
+ srb_t *sp;
+ } iosb;
+ } u;
};
struct qla_chip_state_84xx {
@@ -2520,6 +2698,11 @@ struct rsp_que {
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
struct work_struct q_work;
+
+ dma_addr_t dma_fx00;
+ response_t *ring_fx00;
+ uint16_t length_fx00;
+ uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
};
/* Request queue data structure */
@@ -2544,6 +2727,11 @@ struct req_que {
uint16_t num_outstanding_cmds;
#define MAX_Q_DEPTH 32
int max_q_depth;
+
+ dma_addr_t dma_fx00;
+ request_t *ring_fx00;
+ uint16_t length_fx00;
+ uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
/* Place holder for FW buffer parameters */
@@ -2633,7 +2821,10 @@ struct qla_hw_data {
uint32_t isp82xx_no_md_cap:1;
uint32_t host_shutting_down:1;
uint32_t idc_compl_status:1;
- /* 32 bits */
+
+ uint32_t mr_reset_hdlr_active:1;
+ uint32_t mr_intr_valid:1;
+ /* 34 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2650,7 +2841,21 @@ struct qla_hw_data {
resource_size_t pio_address;
#define MIN_IOBASE_LEN 0x100
-/* Multi queue data structs */
+ dma_addr_t bar0_hdl;
+
+ void __iomem *cregbase;
+ dma_addr_t bar2_hdl;
+#define BAR0_LEN_FX00 (1024 * 1024)
+#define BAR2_LEN_FX00 (128 * 1024)
+
+ uint32_t rqstq_intr_code;
+ uint32_t mbx_intr_code;
+ uint32_t req_que_len;
+ uint32_t rsp_que_len;
+ uint32_t req_que_off;
+ uint32_t rsp_que_off;
+
+ /* Multi queue data structs */
device_reg_t __iomem *mqiobase;
device_reg_t __iomem *msixbase;
uint16_t msix_count;
@@ -2729,7 +2934,8 @@ struct qla_hw_data {
#define DT_ISP8021 BIT_14
#define DT_ISP2031 BIT_15
#define DT_ISP8031 BIT_16
-#define DT_ISP_LAST (DT_ISP8031 << 1)
+#define DT_ISPFX00 BIT_17
+#define DT_ISP_LAST (DT_ISPFX00 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2757,6 +2963,7 @@ struct qla_hw_data {
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
+#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2821,6 +3028,7 @@ struct qla_hw_data {
uint16_t r_a_tov;
int port_down_retry_count;
uint8_t mbx_count;
+ uint8_t aen_mbx_count;
uint32_t login_retry_count;
/* SNS command interfaces. */
@@ -2868,9 +3076,13 @@ struct qla_hw_data {
void *swl;
/* These are used by mailbox operations. */
- volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+ uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+ uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
+ uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
mbx_cmd_t *mcp;
+ struct mbx_cmd_32 *mcp32;
+
unsigned long mbx_cmd_flags;
#define MBX_INTERRUPT 1
#define MBX_INTR_WAIT 2
@@ -3014,6 +3226,7 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
+ struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@@ -3080,6 +3293,8 @@ struct qla_hw_data {
unsigned long host_last_rampup_time;
int cfg_lun_q_depth;
+ struct mr_data_fx00 mr;
+
struct qlt_hw_data tgt;
uint16_t thermal_support;
#define THERMAL_SUPPORT_I2C BIT_0
@@ -3109,6 +3324,8 @@ typedef struct scsi_qla_host {
uint32_t process_response_queue :1;
uint32_t difdix_supported:1;
uint32_t delete_progress:1;
+
+ uint32_t fw_tgt_reported:1;
} flags;
atomic_t loop_state;
@@ -3144,6 +3361,9 @@ typedef struct scsi_qla_host {
#define SCR_PENDING 21 /* SCR in target mode */
#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
#define HOST_RAMP_UP_QUEUE_DEPTH 23
+#define PORT_UPDATE_NEEDED 24
+#define FX00_RESET_RECOVERY 25
+#define FX00_TARGET_SCAN 26
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3234,6 +3454,10 @@ struct qla_tgt_vp_map {
test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
atomic_read(&ha->loop_state) == LOOP_DOWN)
+#define STATE_TRANSITION(ha) \
+ (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
atomic_inc(&__vha->vref_count); \
mb(); \
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa97b545..026bfde33e67 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+extern int qla2x00_init_rings(scsi_qla_host_t *);
/*
* Global Data in qla_os.c source file.
@@ -134,7 +135,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
fc_port_t *, uint16_t *);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
@@ -158,6 +158,7 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
/*
* Global Functions in qla_mid.c source file.
@@ -211,8 +212,6 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
-extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
-extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
@@ -424,6 +423,12 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
+extern srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
+ void *);
+extern void
+qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
+ uint32_t);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -561,6 +566,42 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+/* qlafx00 related functions */
+extern int qlafx00_pci_config(struct scsi_qla_host *);
+extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
+extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_chip_diag(scsi_qla_host_t *);
+extern void qlafx00_config_rings(struct scsi_qla_host *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern irqreturn_t qlafx00_intr_handler(int, void *);
+extern void qlafx00_enable_intrs(struct qla_hw_data *);
+extern void qlafx00_disable_intrs(struct qla_hw_data *);
+extern int qlafx00_abort_command(srb_t *);
+extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
+extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
+extern int qlafx00_start_scsi(srb_t *);
+extern int qlafx00_abort_isp(scsi_qla_host_t *);
+extern int qlafx00_iospace_config(struct qla_hw_data *);
+extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_fw_ready(scsi_qla_host_t *);
+extern int qlafx00_configure_devices(scsi_qla_host_t *);
+extern int qlafx00_reset_initialize(scsi_qla_host_t *);
+extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
+extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
+ uint32_t *, int);
+extern uint32_t qlafx00_fw_state_show(struct device *,
+ struct device_attribute *, char *);
+extern void qlafx00_get_host_speed(struct Scsi_Host *);
+extern void qlafx00_init_response_q_entries(struct rsp_que *);
+
+extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
+extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
+extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
+extern void qlafx00_timer_routine(scsi_qla_host_t *);
+extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+
/* qla82xx related functions */
/* PCI related functions */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9b455250c101..d0ea8b921177 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -639,9 +639,14 @@ void
qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
{
struct qla_hw_data *ha = vha->hw;
- sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
- ha->fw_major_version, ha->fw_minor_version,
- ha->fw_subminor_version, qla2x00_version_str);
+
+ if (IS_QLAFX00(ha))
+ sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+ ha->mr.fw_version, qla2x00_version_str);
+ else
+ sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+ ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version, qla2x00_version_str);
}
/**
@@ -923,7 +928,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sns_cmd->p.gpn_data[9] != 0x02) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
"GPN_ID failed, rejected request, gpn_rsp:\n");
- ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
@@ -1718,7 +1723,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
+ IS_QLAFX00(ha))
return QLA_FUNCTION_FAILED;
rval = qla2x00_mgmt_svr_login(vha);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b59203393cb2..3565dfd8f370 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -25,7 +25,6 @@
*/
static int qla2x00_isp_firmware(scsi_qla_host_t *);
static int qla2x00_setup_chip(scsi_qla_host_t *);
-static int qla2x00_init_rings(scsi_qla_host_t *);
static int qla2x00_fw_ready(scsi_qla_host_t *);
static int qla2x00_configure_hba(scsi_qla_host_t *);
static int qla2x00_configure_loop(scsi_qla_host_t *);
@@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
/* Firmware should use switch negotiated r_a_tov for timeout. */
tmo = ha->r_a_tov / 10 * 2;
- if (!IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLAFX00(ha)) {
+ tmo = FX00_DEF_RATOV * 2;
+ } else if (!IS_FWI2_CAPABLE(ha)) {
/*
* Except for earlier ISPs where the timeout is seeded from the
* initialization control block.
@@ -1977,7 +1978,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
*
* Returns 0 on success.
*/
-static int
+int
qla2x00_init_rings(scsi_qla_host_t *vha)
{
int rval;
@@ -2012,7 +2013,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (!rsp)
continue;
/* Initialize response queue entries */
- qla2x00_init_response_q_entries(rsp);
+ if (IS_QLAFX00(ha))
+ qlafx00_init_response_q_entries(rsp);
+ else
+ qla2x00_init_response_q_entries(rsp);
}
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
@@ -2024,11 +2028,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
+
+ if (IS_QLAFX00(ha)) {
+ rval = qlafx00_init_firmware(vha, ha->init_cb_size);
+ goto next_check;
+ }
+
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
- ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -2042,6 +2051,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
}
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+next_check:
if (rval) {
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
@@ -2069,6 +2079,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
uint16_t state[5];
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLAFX00(vha->hw))
+ return qlafx00_fw_ready(vha);
+
rval = QLA_SUCCESS;
/* 20 seconds for loop down. */
@@ -3134,6 +3147,12 @@ void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
fcport->vha = vha;
+
+ if (IS_QLAFX00(vha->hw)) {
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+ qla2x00_reg_remote_port(vha, fcport);
+ return;
+ }
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3894,15 +3913,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
- /* Issue a marker after FW becomes ready. */
- qla2x00_marker(vha, req, rsp, 0, 0,
- MK_SYNC_ALL);
- vha->marker_needed = 0;
+ if (!IS_QLAFX00(vha->hw)) {
+ /*
+ * Issue a marker after FW becomes
+ * ready.
+ */
+ qla2x00_marker(vha, req, rsp, 0, 0,
+ MK_SYNC_ALL);
+ vha->marker_needed = 0;
+ }
/* Remap devices on Loop. */
clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2x00_configure_loop(vha);
+ if (IS_QLAFX00(vha->hw))
+ qlafx00_configure_devices(vha);
+ else
+ qla2x00_configure_loop(vha);
+
wait_time--;
} while (!atomic_read(&vha->loop_down_timer) &&
!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
@@ -3968,9 +3996,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
if (fcport->drport &&
atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
spin_unlock_irqrestore(&ha->vport_slock, flags);
-
qla2x00_rport_del(fcport);
-
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 68e2c4afc134..98ab921070d2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -5,6 +5,28 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
+/**
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 1) {
+ iocbs += (dsds - 1) / 5;
+ if ((dsds - 1) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
/*
* qla2x00_debounce_register
* Debounce register.
@@ -58,6 +80,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
}
static inline void
+host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
+{
+ uint32_t *isrc = (uint32_t *) src;
+ uint32_t *odest = (uint32_t *) dst;
+ uint32_t iter = bsize >> 2;
+
+ for (; iter ; iter--)
+ *odest++ = cpu_to_le32(*isrc++);
+}
+
+static inline void
qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
{
int i;
@@ -213,12 +246,18 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
+ (sp->type == SRB_FXIOCB_DCMD))
+ init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
}
static inline int
qla2x00_gid_list_size(struct qla_hw_data *ha)
{
- return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+ if (IS_QLAFX00(ha))
+ return sizeof(uint32_t) * 32;
+ else
+ return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
static inline void
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d2630317cce8..15e4080b347c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -135,7 +135,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
/* Load packet defaults. */
- *((uint32_t *)(&cont_pkt->entry_type)) =
+ *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
+ __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
__constant_cpu_to_le32(CONTINUE_A64_TYPE);
return (cont_pkt);
@@ -486,6 +487,10 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
if (ha->mqenable || IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ } else if (IS_QLAFX00(ha)) {
+ WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -514,11 +519,12 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
uint16_t lun, uint8_t type)
{
mrk_entry_t *mrk;
- struct mrk_entry_24xx *mrk24;
+ struct mrk_entry_24xx *mrk24 = NULL;
+ struct mrk_entry_fx00 *mrkfx = NULL;
+
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- mrk24 = NULL;
req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
@@ -531,7 +537,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
- if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLAFX00(ha)) {
+ mrkfx = (struct mrk_entry_fx00 *) mrk;
+ mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
+ mrkfx->handle_hi = 0;
+ mrkfx->tgt_id = cpu_to_le16(loop_id);
+ mrkfx->lun[1] = LSB(lun);
+ mrkfx->lun[2] = MSB(lun);
+ host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
+ } else if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
@@ -589,28 +603,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
return QLA_SUCCESS;
}
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of IOCB entries needed to store @dsds.
- */
-inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
-{
- uint16_t iocbs;
-
- iocbs = 1;
- if (dsds > 1) {
- iocbs += (dsds - 1) / 5;
- if ((dsds - 1) % 5)
- iocbs++;
- }
- return iocbs;
-}
-
static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
@@ -1583,7 +1575,6 @@ queuing_error:
return QLA_FUNCTION_FAILED;
}
-
/**
* qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
@@ -1852,6 +1843,8 @@ skip_cmd_array:
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ else if (IS_QLAFX00(ha))
+ cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -1869,8 +1862,13 @@ skip_cmd_array:
req->cnt -= req_cnt;
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
- pkt->entry_count = req_cnt;
- pkt->handle = handle;
+ if (IS_QLAFX00(ha)) {
+ WRT_REG_BYTE(&pkt->entry_count, req_cnt);
+ WRT_REG_WORD(&pkt->handle, handle);
+ } else {
+ pkt->entry_count = req_cnt;
+ pkt->handle = handle;
+ }
queuing_error:
return pkt;
@@ -2625,7 +2623,16 @@ qla2x00_start_sp(srb_t *sp)
qla2x00_adisc_iocb(sp, pkt);
break;
case SRB_TM_CMD:
- qla24xx_tm_iocb(sp, pkt);
+ IS_QLAFX00(ha) ?
+ qlafx00_tm_iocb(sp, pkt) :
+ qla24xx_tm_iocb(sp, pkt);
+ break;
+ case SRB_FXIOCB_DCMD:
+ case SRB_FXIOCB_BCMD:
+ qlafx00_fxdisc_iocb(sp, pkt);
+ break;
+ case SRB_ABT_CMD:
+ qlafx00_abort_iocb(sp, pkt);
break;
default:
break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e9dbd74c20d3..259d9205d876 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,11 +13,7 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
-#include "qla_target.h"
-
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_process_completed_request(struct scsi_qla_host *,
- struct req_que *, uint32_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
@@ -1065,9 +1061,9 @@ skip_rio:
* @ha: SCSI driver HA context
* @index: SRB index
*/
-static void
+void
qla2x00_process_completed_request(struct scsi_qla_host *vha,
- struct req_que *req, uint32_t index)
+ struct req_que *req, uint32_t index)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -1101,7 +1097,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
-static srb_t *
+srb_t *
qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
struct req_que *req, void *iocb)
{
@@ -1994,7 +1990,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- lscsi_status = scsi_status & STATUS_MASK;
+ lscsi_status = scsi_status & STATUS_MASK;
fcport = sp->fcport;
@@ -2939,7 +2935,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
goto skip_msi;
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2972,7 +2968,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha) && !IS_QLA82XX(ha))
+ !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
@@ -2998,9 +2994,11 @@ skip_msi:
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
- } else if (!ha->flags.msi_enabled)
+ } else if (!ha->flags.msi_enabled) {
ql_dbg(ql_dbg_init, vha, 0x0125,
"INTa mode: Enabled.\n");
+ ha->flags.mr_intr_valid = 1;
+ }
clear_risc_ints:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af56431..9e5d89db7272 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4113,7 +4113,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t iter_cnt = 0x1;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
"Entered %s.\n", __func__);
@@ -4139,8 +4138,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
/* Iteration count */
- mcp->mb[18] = LSW(iter_cnt);
- mcp->mb[19] = MSW(iter_cnt);
+ mcp->mb[18] = LSW(mreq->iteration_count);
+ mcp->mb[19] = MSW(mreq->iteration_count);
mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
@@ -4518,7 +4517,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
goto done;
ql_log(ql_log_warn, vha, 0x10c9,
- "Thermal not supported by I2C.\n");
+ "Thermal not supported through I2C bus, trying alternate "
+ "method (ISP access).\n");
ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
@@ -4528,7 +4528,7 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
goto done;
ql_log(ql_log_warn, vha, 0x1019,
- "Thermal not supported by ISP.\n");
+ "Thermal not supported through ISP.\n");
ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644
index 000000000000..729b74389f83
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -0,0 +1,3476 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/utsname.h>
+
+
+/* QLAFX00 specific Mailbox implementation functions */
+
+/*
+ * qlafx00_mailbox_command
+ * Issue mailbox command and waits for completion.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ * 0 : QLA_SUCCESS = cmd performed success
+ * 1 : QLA_FUNCTION_FAILED (error encountered)
+ * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
+
+{
+ int rval;
+ unsigned long flags = 0;
+ device_reg_t __iomem *reg;
+ uint8_t abort_active;
+ uint8_t io_lock_on;
+ uint16_t command = 0;
+ uint32_t *iptr;
+ uint32_t __iomem *optr;
+ uint32_t cnt;
+ uint32_t mboxes;
+ unsigned long wait_time;
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+ if (ha->pdev->error_state > pci_channel_io_frozen) {
+ ql_log(ql_log_warn, vha, 0x115c,
+ "error_state is greater than pci_channel_io_frozen, "
+ "exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_log(ql_log_warn, vha, 0x115f,
+ "Device in failed state, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ reg = ha->iobase;
+ io_lock_on = base_vha->flags.init_done;
+
+ rval = QLA_SUCCESS;
+ abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_log(ql_log_warn, vha, 0x1175,
+ "Perm failure on EEH timeout MBX, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ql_log(ql_log_warn, vha, 0x1176,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
+ /*
+ * Wait for active mailbox commands to finish by waiting at most tov
+ * seconds. This is to serialize actual issuing of mailbox cmds during
+ * non ISP abort time.
+ */
+ if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+ /* Timeout occurred. Return error. */
+ ql_log(ql_log_warn, vha, 0x1177,
+ "Cmd access timeout, cmd=0x%x, Exiting.\n",
+ mcp->mb[0]);
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 1;
+ /* Save mailbox command for debug */
+ ha->mcp32 = mcp;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1178,
+ "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Load mailbox registers. */
+ optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+
+ iptr = mcp->mb;
+ command = mcp->mb[0];
+ mboxes = mcp->out_mb;
+
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0)
+ WRT_REG_DWORD(optr, *iptr);
+
+ mboxes >>= 1;
+ optr++;
+ iptr++;
+ }
+
+ /* Issue set host interrupt command to send cmd out. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
+ (uint8_t *)mcp->mb, 16);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
+ ((uint8_t *)mcp->mb + 0x10), 16);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
+ ((uint8_t *)mcp->mb + 0x20), 8);
+
+ /* Unlock mbx registers and wait for interrupt */
+ ql_dbg(ql_dbg_mbx, vha, 0x1179,
+ "Going to unlock irq & waiting for interrupts. "
+ "jiffies=%lx.\n", jiffies);
+
+ /* Wait for mbx cmd completion until timeout */
+ if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+ set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+ QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x112c,
+ "Cmd=%x Polling Mode.\n", command);
+
+ QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+ while (!ha->flags.mbox_int) {
+ if (time_after(jiffies, wait_time))
+ break;
+
+ /* Check for pending interrupts. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
+ usleep_range(10000, 11000);
+ } /* while */
+ ql_dbg(ql_dbg_mbx, vha, 0x112d,
+ "Waited %d sec.\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+ }
+
+ /* Check whether we timed out */
+ if (ha->flags.mbox_int) {
+ uint32_t *iptr2;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x112e,
+ "Cmd=%x completed.\n", command);
+
+ /* Got interrupt. Clear the flag. */
+ ha->flags.mbox_int = 0;
+ clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+ if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+
+ /* Load return mailbox registers. */
+ iptr2 = mcp->mb;
+ iptr = (uint32_t *)&ha->mailbox_out32[0];
+ mboxes = mcp->in_mb;
+ for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ if (mboxes & BIT_0)
+ *iptr2 = *iptr;
+
+ mboxes >>= 1;
+ iptr2++;
+ iptr++;
+ }
+ } else {
+
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
+
+ ha->flags.mbox_busy = 0;
+
+ /* Clean up */
+ ha->mcp32 = NULL;
+
+ if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x113a,
+ "checking for additional resp interrupt.\n");
+
+ /* polling mode for non isp_abort commands. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+ }
+
+ if (rval == QLA_FUNCTION_TIMEOUT &&
+ mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
+ /* not in dpc. schedule it for dpc to take over. */
+ ql_dbg(ql_dbg_mbx, vha, 0x115d,
+ "Timeout, schedule isp_abort_needed.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+ ql_log(ql_log_info, base_vha, 0x115e,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+ "abort.\n", command, mcp->mb[0],
+ ha->flags.eeh_busy);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (!abort_active) {
+ /* call abort directly since we are in the DPC thread */
+ ql_dbg(ql_dbg_mbx, vha, 0x1160,
+ "Timeout, calling abort_isp.\n");
+
+ if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+ ql_log(ql_log_info, base_vha, 0x1161,
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
+ "mb[0]=0x%x. Scheduling ISP abort ",
+ command, mcp->mb[0]);
+
+ set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ if (ha->isp_ops->abort_isp(vha)) {
+ /* Failed. retry later. */
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_mbx, vha, 0x1162,
+ "Finished abort_isp.\n");
+ }
+ }
+ }
+
+premature_exit:
+ /* Allow next mbx cmd to come in. */
+ complete(&ha->mbx_cmd_comp);
+
+ if (rval) {
+ ql_log(ql_log_warn, base_vha, 0x1163,
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
+ "mb[3]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+ } else {
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_driver_shutdown
+ * Indicate a driver shutdown to firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_0;
+ if (tmo)
+ mcp->tov = tmo;
+ else
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1167,
+ "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_get_firmware_state
+ * Get adapter firmware state.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla7xxx local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+static int
+qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ /* Return firmware states. */
+ states[0] = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x116a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/*
+ * qlafx00_init_firmware
+ * Initialize adapter firmware.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * dptr = Initialization control block pointer.
+ * size = size of initialization control block.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qlafx00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+ mcp->mb[1] = 0;
+ mcp->mb[2] = MSD(ha->init_cb_dma);
+ mcp->mb[3] = LSD(ha->init_cb_dma);
+
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qlafx00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x116d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/*
+ * qlafx00_mbx_reg_test
+ */
+static int
+qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct mbx_cmd_32 mc;
+ struct mbx_cmd_32 *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
+ "Entered %s.\n", __func__);
+
+
+ mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+ mcp->mb[1] = 0xAAAA;
+ mcp->mb[2] = 0x5555;
+ mcp->mb[3] = 0xAA55;
+ mcp->mb[4] = 0x55AA;
+ mcp->mb[5] = 0xA5A5;
+ mcp->mb[6] = 0x5A5A;
+ mcp->mb[7] = 0x2525;
+ mcp->mb[8] = 0xBBBB;
+ mcp->mb[9] = 0x6666;
+ mcp->mb[10] = 0xBB66;
+ mcp->mb[11] = 0x66BB;
+ mcp->mb[12] = 0xB6B6;
+ mcp->mb[13] = 0x6B6B;
+ mcp->mb[14] = 0x3636;
+ mcp->mb[15] = 0xCCCC;
+
+
+ mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->buf_size = 0;
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS;
+ rval = qlafx00_mailbox_command(vha, mcp);
+ if (rval == QLA_SUCCESS) {
+ if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
+ mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
+ mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
+ mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
+ rval = QLA_FUNCTION_FAILED;
+ if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
+ mcp->mb[31] != 0xCCCC)
+ rval = QLA_FUNCTION_FAILED;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1170,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
+ "Done %s.\n", __func__);
+ }
+ return rval;
+}
+
+/**
+ * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_pci_config(scsi_qla_host_t *vha)
+{
+ uint16_t w;
+ struct qla_hw_data *ha = vha->hw;
+
+ pci_set_master(ha->pdev);
+ pci_try_set_mwi(ha->pdev);
+
+ pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+ w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+ w &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+ /* PCIe -- adjust Maximum Read Request Size (2048). */
+ if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ pcie_set_readrq(ha->pdev, 2048);
+
+ ha->chip_revision = ha->pdev->revision;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * @ha: HA context
+ *
+ */
+static inline void
+qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
+{
+ unsigned long flags = 0;
+ struct qla_hw_data *ha = vha->hw;
+ int i, core;
+ uint32_t cnt;
+
+ /* Set all 4 cores in reset */
+ for (i = 0; i < 4; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
+ }
+
+ /* Set all 4 core Clock gating control */
+ for (i = 0; i < 4; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
+ }
+
+ /* Reset all units in Fabric */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
+
+ /* Reset all interrupt control registers */
+ for (i = 0; i < 115; i++) {
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
+ }
+
+ /* Reset Timers control registers. per core */
+ for (core = 0; core < 4; core++)
+ for (i = 0; i < 8; i++)
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
+
+ /* Reset per core IRQ ack register */
+ for (core = 0; core < 4; core++)
+ QLAFX00_SET_HBA_SOC_REG(ha,
+ (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
+
+ /* Set Fabric control and config to defaults */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Kick in Fabric units */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
+
+ /* Kick in Core0 to start boot process */
+ QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+
+ /* Wait 10secs for soft-reset to complete. */
+ for (cnt = 10; cnt; cnt--) {
+ msleep(1000);
+ barrier();
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qlafx00_soft_reset() - Soft Reset ISPFx00.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_soft_reset(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure))
+ return;
+
+ ha->isp_ops->disable_intrs(ha);
+ qlafx00_soc_cpu_reset(vha);
+ ha->isp_ops->enable_intrs(ha);
+}
+
+/**
+ * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_chip_diag(scsi_qla_host_t *vha)
+{
+ int rval = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+
+ ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+ rval = qlafx00_mbx_reg_test(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x1165,
+ "Failed mailbox send register test\n");
+ } else {
+ /* Flag a successful rval */
+ rval = QLA_SUCCESS;
+ }
+ return rval;
+}
+
+void
+qlafx00_config_rings(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ struct init_cb_fx *icb;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ /* Setup ring parameters in initialization control block. */
+ icb = (struct init_cb_fx *)ha->init_cb;
+ icb->request_q_outpointer = __constant_cpu_to_le16(0);
+ icb->response_q_inpointer = __constant_cpu_to_le16(0);
+ icb->request_q_length = cpu_to_le16(req->length);
+ icb->response_q_length = cpu_to_le16(rsp->length);
+ icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+ icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+ icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+ icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+ WRT_REG_DWORD(&reg->req_q_in, 0);
+ WRT_REG_DWORD(&reg->req_q_out, 0);
+
+ WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ WRT_REG_DWORD(&reg->rsp_q_out, 0);
+
+ /* PCI posting */
+ RD_REG_DWORD(&reg->rsp_q_out);
+}
+
+char *
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int pcie_reg;
+
+ pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ if (pcie_reg) {
+ strcpy(str, "PCIe iSA");
+ return str;
+ }
+ return str;
+}
+
+char *
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ sprintf(str, "%s", ha->mr.fw_version);
+ return str;
+}
+
+void
+qlafx00_enable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 1;
+ QLAFX00_ENABLE_ICNTRL_REG(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlafx00_disable_intrs(struct qla_hw_data *ha)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->interrupts_on = 0;
+ QLAFX00_DISABLE_ICNTRL_REG(ha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qlafx00_tmf_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ tmf->u.tmf.comp_status = CS_TIMEOUT;
+ complete(&tmf->u.tmf.comp);
+}
+
+static void
+qlafx00_tmf_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ complete(&tmf->u.tmf.comp);
+}
+
+static int
+qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
+ uint32_t lun, uint32_t tag)
+{
+ scsi_qla_host_t *vha = fcport->vha;
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ tm_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_TM_CMD;
+ sp->name = "tmf";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ tm_iocb->u.tmf.flags = flags;
+ tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.data = tag;
+ sp->done = qlafx00_tmf_sp_done;
+ tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
+ init_completion(&tm_iocb->u.tmf.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_async, vha, 0x507b,
+ "Task management command issued target_id=%x\n",
+ fcport->tgt_id);
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+
+ rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+int
+qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
+{
+ return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+}
+
+int
+qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
+{
+ return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+}
+
+int
+qlafx00_iospace_config(struct qla_hw_data *ha)
+{
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
+ "Invalid PCI mem BAR0 region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->cregbase =
+ ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ if (!ha->cregbase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
+ "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
+ "region #2 not an MMIO resource (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
+ "Invalid PCI mem BAR2 region size (%s), aborting\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase =
+ ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
+ "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+
+ ql_log_pci(ql_log_info, ha->pdev, 0x012c,
+ "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
+ ha->bars, ha->cregbase, ha->iobase);
+
+ return 0;
+
+iospace_error_exit:
+ return -ENOMEM;
+}
+
+static void
+qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+
+ req->length_fx00 = req->length;
+ req->ring_fx00 = req->ring;
+ req->dma_fx00 = req->dma;
+
+ rsp->length_fx00 = rsp->length;
+ rsp->ring_fx00 = rsp->ring;
+ rsp->dma_fx00 = rsp->dma;
+
+ ql_dbg(ql_dbg_init, vha, 0x012d,
+ "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
+ "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
+ req->length_fx00, (u64)req->dma_fx00);
+
+ ql_dbg(ql_dbg_init, vha, 0x012e,
+ "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
+ "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
+ rsp->length_fx00, (u64)rsp->dma_fx00);
+}
+
+static int
+qlafx00_config_queues(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
+
+ req->length = ha->req_que_len;
+ req->ring = (void *)ha->iobase + ha->req_que_off;
+ req->dma = bar2_hdl + ha->req_que_off;
+ if ((!req->ring) || (req->length == 0)) {
+ ql_log_pci(ql_log_info, ha->pdev, 0x012f,
+ "Unable to allocate memory for req_ring\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0130,
+ "req: %p req_ring pointer %p req len 0x%x "
+ "req off 0x%x\n, req->dma: 0x%llx",
+ req, req->ring, req->length,
+ ha->req_que_off, (u64)req->dma);
+
+ rsp->length = ha->rsp_que_len;
+ rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+ rsp->dma = bar2_hdl + ha->rsp_que_off;
+ if ((!rsp->ring) || (rsp->length == 0)) {
+ ql_log_pci(ql_log_info, ha->pdev, 0x0131,
+ "Unable to allocate memory for rsp_ring\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x0132,
+ "rsp: %p rsp_ring pointer %p rsp len 0x%x "
+ "rsp off 0x%x, rsp->dma: 0x%llx\n",
+ rsp, rsp->ring, rsp->length,
+ ha->rsp_que_off, (u64)rsp->dma);
+
+ return QLA_SUCCESS;
+}
+
+static int
+qlafx00_init_fw_ready(scsi_qla_host_t *vha)
+{
+ int rval = 0;
+ unsigned long wtime;
+ uint16_t wait_time; /* Wait time */
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t aenmbx, aenmbx7 = 0;
+ uint32_t state[5];
+ bool done = false;
+
+ /* 30 seconds wait - Adjust if required */
+ wait_time = 30;
+
+ /* wait time before firmware ready */
+ wtime = jiffies + (wait_time * HZ);
+ do {
+ aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ barrier();
+ ql_dbg(ql_dbg_mbx, vha, 0x0133,
+ "aenmbx: 0x%x\n", aenmbx);
+
+ switch (aenmbx) {
+ case MBA_FW_NOT_STARTED:
+ case MBA_FW_STARTING:
+ break;
+
+ case MBA_SYSTEM_ERR:
+ case MBA_REQ_TRANSFER_ERR:
+ case MBA_RSP_TRANSFER_ERR:
+ case MBA_FW_INIT_FAILURE:
+ qlafx00_soft_reset(vha);
+ break;
+
+ case MBA_FW_RESTART_CMPLT:
+ /* Set the mbx and rqstq intr code */
+ aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ WRT_REG_DWORD(&reg->aenmailbox0, 0);
+ RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ql_dbg(ql_dbg_init, vha, 0x0134,
+ "f/w returned mbx_intr_code: 0x%x, "
+ "rqstq_intr_code: 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code);
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+ rval = QLA_SUCCESS;
+ done = true;
+ break;
+
+ default:
+ /* If fw is apparently not ready. In order to continue,
+ * we might need to issue Mbox cmd, but the problem is
+ * that the DoorBell vector values that come with the
+ * 8060 AEN are most likely gone by now (and thus no
+ * bell would be rung on the fw side when mbox cmd is
+ * issued). We have to therefore grab the 8060 AEN
+ * shadow regs (filled in by FW when the last 8060
+ * AEN was being posted).
+ * Do the following to determine what is needed in
+ * order to get the FW ready:
+ * 1. reload the 8060 AEN values from the shadow regs
+ * 2. clear int status to get rid of possible pending
+ * interrupts
+ * 3. issue Get FW State Mbox cmd to determine fw state
+ * Set the mbx and rqstq intr code from Shadow Regs
+ */
+ aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->initval1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
+ ha->req_que_len = RD_REG_DWORD(&reg->initval5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ql_dbg(ql_dbg_init, vha, 0x0135,
+ "f/w returned mbx_intr_code: 0x%x, "
+ "rqstq_intr_code: 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code);
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ /* Get the FW state */
+ rval = qlafx00_get_firmware_state(vha, state);
+ if (rval != QLA_SUCCESS) {
+ /* Retry if timer has not expired */
+ break;
+ }
+
+ if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
+ /* Firmware is waiting to be
+ * initialized by driver
+ */
+ rval = QLA_SUCCESS;
+ done = true;
+ break;
+ }
+
+ /* Issue driver shutdown and wait until f/w recovers.
+ * Driver should continue to poll until 8060 AEN is
+ * received indicating firmware recovery.
+ */
+ ql_dbg(ql_dbg_init, vha, 0x0136,
+ "Sending Driver shutdown fw_state 0x%x\n",
+ state[0]);
+
+ rval = qlafx00_driver_shutdown(vha, 10);
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ }
+ msleep(500);
+
+ wtime = jiffies + (wait_time * HZ);
+ break;
+ }
+
+ if (!done) {
+ if (time_after_eq(jiffies, wtime)) {
+ ql_dbg(ql_dbg_init, vha, 0x0137,
+ "Init f/w failed: aen[7]: 0x%x\n",
+ RD_REG_DWORD(&reg->aenmailbox7));
+ rval = QLA_FUNCTION_FAILED;
+ done = true;
+ break;
+ }
+ /* Delay for a while */
+ msleep(500);
+ }
+ } while (!done);
+
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x0138,
+ "%s **** FAILED ****.\n", __func__);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x0139,
+ "%s **** SUCCESS ****.\n", __func__);
+
+ return rval;
+}
+
+/*
+ * qlafx00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_fw_ready(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long wtime;
+ uint16_t wait_time; /* Wait time if loop is coming ready */
+ uint32_t state[5];
+
+ rval = QLA_SUCCESS;
+
+ wait_time = 10;
+
+ /* wait time before firmware ready */
+ wtime = jiffies + (wait_time * HZ);
+
+ /* Wait for ISP to finish init */
+ if (!vha->flags.init_done)
+ ql_dbg(ql_dbg_init, vha, 0x013a,
+ "Waiting for init to complete...\n");
+
+ do {
+ rval = qlafx00_get_firmware_state(vha, state);
+
+ if (rval == QLA_SUCCESS) {
+ if (state[0] == FSTATE_FX00_INITIALIZED) {
+ ql_dbg(ql_dbg_init, vha, 0x013b,
+ "fw_state=%x\n", state[0]);
+ rval = QLA_SUCCESS;
+ break;
+ }
+ }
+ rval = QLA_FUNCTION_FAILED;
+
+ if (time_after_eq(jiffies, wtime))
+ break;
+
+ /* Delay for a while */
+ msleep(500);
+
+ ql_dbg(ql_dbg_init, vha, 0x013c,
+ "fw_state=%x curr time=%lx.\n", state[0], jiffies);
+ } while (1);
+
+
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x013d,
+ "Firmware ready **** FAILED ****.\n");
+ else
+ ql_dbg(ql_dbg_init, vha, 0x013e,
+ "Firmware ready **** SUCCESS ****.\n");
+
+ return rval;
+}
+
+static int
+qlafx00_find_all_targets(scsi_qla_host_t *vha,
+ struct list_head *new_fcports)
+{
+ int rval;
+ uint16_t tgt_id;
+ fc_port_t *fcport, *new_fcport;
+ int found;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = QLA_SUCCESS;
+
+ if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+ return QLA_FUNCTION_FAILED;
+
+ if ((atomic_read(&vha->loop_down_timer) ||
+ STATE_TRANSITION(vha))) {
+ atomic_set(&vha->loop_down_timer, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
+ "Listing Target bit map...\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
+ 0x2089, (uint8_t *)ha->gid_list, 32);
+
+ /* Allocate temporary rmtport for any new rmtports discovered. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL)
+ return QLA_MEMORY_ALLOC_FAILED;
+
+ for_each_set_bit(tgt_id, (void *)ha->gid_list,
+ QLAFX00_TGT_NODE_LIST_SIZE) {
+
+ /* Send get target node info */
+ new_fcport->tgt_id = tgt_id;
+ rval = qlafx00_fx_disc(vha, new_fcport,
+ FXDISC_GET_TGT_NODE_INFO);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x208a,
+ "Target info scan failed -- assuming zero-entry "
+ "result...\n");
+ continue;
+ }
+
+ /* Locate matching device in database. */
+ found = 0;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(new_fcport->port_name,
+ fcport->port_name, WWN_SIZE))
+ continue;
+
+ found++;
+
+ /*
+ * If tgt_id is same and state FCS_ONLINE, nothing
+ * changed.
+ */
+ if (fcport->tgt_id == new_fcport->tgt_id &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
+ break;
+
+ /*
+ * Tgt ID changed or device was marked to be updated.
+ */
+ ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
+ "TGT-ID Change(%s): Present tgt id: "
+ "0x%x state: 0x%x "
+ "wwnn = %llx wwpn = %llx.\n",
+ __func__, fcport->tgt_id,
+ atomic_read(&fcport->state),
+ (unsigned long long)wwn_to_u64(fcport->node_name),
+ (unsigned long long)wwn_to_u64(fcport->port_name));
+
+ ql_log(ql_log_info, vha, 0x208c,
+ "TGT-ID Announce(%s): Discovered tgt "
+ "id 0x%x wwnn = %llx "
+ "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
+ (unsigned long long)
+ wwn_to_u64(new_fcport->node_name),
+ (unsigned long long)
+ wwn_to_u64(new_fcport->port_name));
+
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ fcport->old_tgt_id = fcport->tgt_id;
+ fcport->tgt_id = new_fcport->tgt_id;
+ ql_log(ql_log_info, vha, 0x208d,
+ "TGT-ID: New fcport Added: %p\n", fcport);
+ qla2x00_update_fcport(vha, fcport);
+ } else {
+ ql_log(ql_log_info, vha, 0x208e,
+ " Existing TGT-ID %x did not get "
+ " offline event from firmware.\n",
+ fcport->old_tgt_id);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ kfree(new_fcport);
+ return rval;
+ }
+ break;
+ }
+
+ if (found)
+ continue;
+
+ /* If device was not in our fcports list, then add it. */
+ list_add_tail(&new_fcport->list, new_fcports);
+
+ /* Allocate a new replacement fcport. */
+ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (new_fcport == NULL)
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ kfree(new_fcport);
+ return rval;
+}
+
+/*
+ * qlafx00_configure_all_targets
+ * Setup target devices with node ID's.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * BIT_0 = error
+ */
+static int
+qlafx00_configure_all_targets(scsi_qla_host_t *vha)
+{
+ int rval;
+ fc_port_t *fcport, *rmptemp;
+ LIST_HEAD(new_fcports);
+
+ rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+ FXDISC_GET_TGT_NODE_LIST);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
+
+ rval = qlafx00_find_all_targets(vha, &new_fcports);
+ if (rval != QLA_SUCCESS) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return rval;
+ }
+
+ /*
+ * Delete all previous devices marked lost.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+ if (fcport->port_type != FCT_INITIATOR)
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ }
+ }
+
+ /*
+ * Add the new devices to our devices list.
+ */
+ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ qla2x00_update_fcport(vha, fcport);
+ list_move_tail(&fcport->list, &vha->vp_fcports);
+ ql_log(ql_log_info, vha, 0x208f,
+ "Attach new target id 0x%x wwnn = %llx "
+ "wwpn = %llx.\n",
+ fcport->tgt_id,
+ (unsigned long long)wwn_to_u64(fcport->node_name),
+ (unsigned long long)wwn_to_u64(fcport->port_name));
+ }
+
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
+ return rval;
+}
+
+/*
+ * qlafx00_configure_devices
+ * Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success.
+ * 1 = error.
+ * 2 = database was full and device was not configured.
+ */
+int
+qlafx00_configure_devices(scsi_qla_host_t *vha)
+{
+ int rval;
+ unsigned long flags, save_flags;
+ rval = QLA_SUCCESS;
+
+ save_flags = flags = vha->dpc_flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2090,
+ "Configure devices -- dpc flags =0x%lx\n", flags);
+
+ rval = qlafx00_configure_all_targets(vha);
+
+ if (rval == QLA_SUCCESS) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ atomic_set(&vha->loop_state, LOOP_READY);
+ ql_log(ql_log_info, vha, 0x2091,
+ "Device Ready\n");
+ }
+ }
+
+ if (rval) {
+ ql_dbg(ql_dbg_disc, vha, 0x2092,
+ "%s *** FAILED ***.\n", __func__);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2093,
+ "%s: exiting normally.\n", __func__);
+ }
+ return rval;
+}
+
+static void
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport;
+
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ ha->mr.fw_hbt_en = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+
+ ql_log(ql_log_info, vha, 0x013f,
+ "Performing ISP error recovery - ha = %p.\n", ha);
+
+ ha->isp_ops->reset_chip(vha);
+
+ if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer,
+ QLAFX00_LOOP_DOWN_TIME);
+ } else {
+ if (!atomic_read(&vha->loop_down_timer))
+ atomic_set(&vha->loop_down_timer,
+ QLAFX00_LOOP_DOWN_TIME);
+ }
+
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->flags = 0;
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ }
+
+ if (!ha->flags.eeh_busy) {
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ }
+
+ qla2x00_free_irqs(vha);
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ ql_log(ql_log_info, vha, 0x0140,
+ "%s Done done - ha=%p.\n", __func__, ha);
+}
+
+/**
+ * qlafx00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_init_response_q_entries(struct rsp_que *rsp)
+{
+ uint16_t cnt;
+ response_t *pkt;
+
+ rsp->ring_ptr = rsp->ring;
+ rsp->ring_index = 0;
+ rsp->status_srb = NULL;
+ pkt = rsp->ring_ptr;
+ for (cnt = 0; cnt < rsp->length; cnt++) {
+ pkt->signature = RESPONSE_PROCESSED;
+ WRT_REG_DWORD(&pkt->signature, RESPONSE_PROCESSED);
+ pkt++;
+ }
+}
+
+int
+qlafx00_rescan_isp(scsi_qla_host_t *vha)
+{
+ uint32_t status = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t aenmbx7;
+
+ qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
+
+ aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+ ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+ ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+
+ ql_dbg(ql_dbg_disc, vha, 0x2094,
+ "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
+ " Req que offset 0x%x Rsp que offset 0x%x\n",
+ ha->mbx_intr_code, ha->rqstq_intr_code,
+ ha->req_que_off, ha->rsp_que_len);
+
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+ status = qla2x00_init_rings(vha);
+ if (!status) {
+ vha->flags.online = 1;
+
+ /* if no cable then assume it's good */
+ if ((vha->device_flags & DFLG_NO_CABLE))
+ status = 0;
+ /* Register system information */
+ if (qlafx00_fx_disc(vha,
+ &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
+ ql_dbg(ql_dbg_disc, vha, 0x2095,
+ "failed to register host info\n");
+ }
+ scsi_unblock_requests(vha->host);
+ return status;
+}
+
+void
+qlafx00_timer_routine(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heart_beat;
+ uint32_t aenmbx0;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+ /* Check firmware health */
+ if (ha->mr.fw_hbt_cnt)
+ ha->mr.fw_hbt_cnt--;
+ else {
+ if ((!ha->flags.mr_reset_hdlr_active) &&
+ (!test_bit(UNLOADING, &vha->dpc_flags)) &&
+ (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+ (ha->mr.fw_hbt_en)) {
+ fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
+ ha->mr.old_fw_hbt_cnt = fw_heart_beat;
+ ha->mr.fw_hbt_miss_cnt = 0;
+ } else {
+ ha->mr.fw_hbt_miss_cnt++;
+ if (ha->mr.fw_hbt_miss_cnt ==
+ QLAFX00_HEARTBEAT_MISS_CNT) {
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_hbt_miss_cnt = 0;
+ }
+ }
+ }
+ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+ }
+
+ if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
+ /* Reset recovery to be performed in timer routine */
+ aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ if (ha->mr.fw_reset_timer_exp) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_reset_timer_exp = 0;
+ } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
+ /* Wake up DPC to rescan the targets */
+ set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
+ clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ } else if ((aenmbx0 == MBA_FW_STARTING) &&
+ (!ha->mr.fw_hbt_en)) {
+ ha->mr.fw_hbt_en = 1;
+ } else if (!ha->mr.fw_reset_timer_tick) {
+ if (aenmbx0 == ha->mr.old_aenmbx0_state)
+ ha->mr.fw_reset_timer_exp = 1;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ } else if (aenmbx0 == 0xFFFFFFFF) {
+ uint32_t data0, data1;
+
+ data0 = QLAFX00_RD_REG(ha,
+ QLAFX00_BAR1_BASE_ADDR_REG);
+ data1 = QLAFX00_RD_REG(ha,
+ QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
+
+ data0 &= 0xffff0000;
+ data1 &= 0x0000ffff;
+
+ QLAFX00_WR_REG(ha,
+ QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
+ (data0 | data1));
+ } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
+ ha->mr.fw_reset_timer_tick =
+ QLAFX00_MAX_RESET_INTERVAL;
+ }
+ ha->mr.old_aenmbx0_state = aenmbx0;
+ ha->mr.fw_reset_timer_tick--;
+ }
+}
+
+/*
+ * qlfx00a_reset_initialize
+ * Re-initialize after a iSA device reset.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_reset_initialize(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_dbg(ql_dbg_init, vha, 0x0142,
+ "Device in failed state\n");
+ return QLA_SUCCESS;
+ }
+
+ ha->flags.mr_reset_hdlr_active = 1;
+
+ if (vha->flags.online) {
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha);
+ }
+
+ ql_log(ql_log_info, vha, 0x0143,
+ "(%s): succeeded.\n", __func__);
+ ha->flags.mr_reset_hdlr_active = 0;
+ return QLA_SUCCESS;
+}
+
+/*
+ * qlafx00_abort_isp
+ * Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_abort_isp(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.online) {
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ return QLA_SUCCESS;
+ }
+
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha);
+ }
+
+ ql_log(ql_log_info, vha, 0x0145,
+ "(%s): succeeded.\n", __func__);
+
+ return QLA_SUCCESS;
+}
+
+static inline fc_port_t*
+qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
+{
+ fc_port_t *fcport;
+
+ /* Check for matching device in remote port list. */
+ fcport = NULL;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->tgt_id == tgt_id) {
+ ql_dbg(ql_dbg_async, vha, 0x5072,
+ "Matching fcport(%p) found with TGT-ID: 0x%x "
+ "and Remote TGT_ID: 0x%x\n",
+ fcport, fcport->tgt_id, tgt_id);
+ break;
+ }
+ }
+ return fcport;
+}
+
+static void
+qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
+{
+ fc_port_t *fcport;
+
+ ql_log(ql_log_info, vha, 0x5073,
+ "Detach TGT-ID: 0x%x\n", tgt_id);
+
+ fcport = qlafx00_get_fcport(vha, tgt_id);
+ if (!fcport)
+ return;
+
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
+
+ return;
+}
+
+int
+qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
+{
+ int rval = 0;
+ uint32_t aen_code, aen_data;
+
+ aen_code = FCH_EVT_VENDOR_UNIQUE;
+ aen_data = evt->u.aenfx.evtcode;
+
+ switch (evt->u.aenfx.evtcode) {
+ case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
+ if (evt->u.aenfx.mbx[1] == 0) {
+ if (evt->u.aenfx.mbx[2] == 1) {
+ if (!vha->flags.fw_tgt_reported)
+ vha->flags.fw_tgt_reported = 1;
+ atomic_set(&vha->loop_down_timer, 0);
+ atomic_set(&vha->loop_state, LOOP_UP);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (evt->u.aenfx.mbx[2] == 2) {
+ qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
+ }
+ } else if (evt->u.aenfx.mbx[1] == 0xffff) {
+ if (evt->u.aenfx.mbx[2] == 1) {
+ if (!vha->flags.fw_tgt_reported)
+ vha->flags.fw_tgt_reported = 1;
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else if (evt->u.aenfx.mbx[2] == 2) {
+ vha->device_flags |= DFLG_NO_CABLE;
+ qla2x00_mark_all_devices_lost(vha, 1);
+ }
+ }
+ break;
+ case QLAFX00_MBA_LINK_UP:
+ aen_code = FCH_EVT_LINKUP;
+ aen_data = 0;
+ break;
+ case QLAFX00_MBA_LINK_DOWN:
+ aen_code = FCH_EVT_LINKDOWN;
+ aen_data = 0;
+ break;
+ }
+
+ fc_host_post_event(vha->host, fc_get_event_number(),
+ aen_code, aen_data);
+
+ return rval;
+}
+
+static void
+qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
+{
+ u64 port_name = 0, node_name = 0;
+
+ port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
+ node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
+
+ fc_host_node_name(vha->host) = node_name;
+ fc_host_port_name(vha->host) = port_name;
+ if (!pinfo->port_type)
+ vha->hw->current_topology = ISP_CFG_F;
+ if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
+ atomic_set(&vha->loop_state, LOOP_READY);
+ else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
+}
+
+static void
+qla2x00_fxdisc_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+static void
+qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+int
+qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t fx_type)
+{
+ srb_t *sp;
+ struct srb_iocb *fdisc;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ struct host_system_info *phost_info;
+ struct register_host_info *preg_hsi;
+ struct new_utsname *p_sysid = NULL;
+ struct timeval tv;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ fdisc = &sp->u.iocb_cmd;
+ switch (fx_type) {
+ case FXDISC_GET_CONFIG_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID;
+ fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
+ break;
+ case FXDISC_GET_PORT_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
+ fdisc->u.fxiocb.req_data = fcport->port_id;
+ break;
+ case FXDISC_GET_TGT_NODE_INFO:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
+ fdisc->u.fxiocb.req_data = fcport->tgt_id;
+ break;
+ case FXDISC_GET_TGT_NODE_LIST:
+ fdisc->u.fxiocb.flags =
+ SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+ fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
+ break;
+ case FXDISC_REG_HOST_INFO:
+ fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
+ fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
+ p_sysid = utsname();
+ if (!p_sysid) {
+ ql_log(ql_log_warn, vha, 0x303c,
+ "Not able to get the system informtion\n");
+ goto done_free_sp;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+ fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
+ fdisc->u.fxiocb.req_len,
+ &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
+ if (!fdisc->u.fxiocb.req_addr)
+ goto done_free_sp;
+
+ if (fx_type == FXDISC_REG_HOST_INFO) {
+ preg_hsi = (struct register_host_info *)
+ fdisc->u.fxiocb.req_addr;
+ phost_info = &preg_hsi->hsi;
+ memset(preg_hsi, 0, sizeof(struct register_host_info));
+ phost_info->os_type = OS_TYPE_LINUX;
+ strncpy(phost_info->sysname,
+ p_sysid->sysname, SYSNAME_LENGTH);
+ strncpy(phost_info->nodename,
+ p_sysid->nodename, NODENAME_LENGTH);
+ strncpy(phost_info->release,
+ p_sysid->release, RELEASE_LENGTH);
+ strncpy(phost_info->version,
+ p_sysid->version, VERSION_LENGTH);
+ strncpy(phost_info->machine,
+ p_sysid->machine, MACHINE_LENGTH);
+ strncpy(phost_info->domainname,
+ p_sysid->domainname, DOMNAME_LENGTH);
+ strncpy(phost_info->hostdriver,
+ QLA2XXX_VERSION, VERSION_LENGTH);
+ do_gettimeofday(&tv);
+ preg_hsi->utc = (uint64_t)tv.tv_sec;
+ ql_dbg(ql_dbg_init, vha, 0x0149,
+ "ISP%04X: Host registration with firmware\n",
+ ha->pdev->device);
+ ql_dbg(ql_dbg_init, vha, 0x014a,
+ "os_type = '%d', sysname = '%s', nodname = '%s'\n",
+ phost_info->os_type,
+ phost_info->sysname,
+ phost_info->nodename);
+ ql_dbg(ql_dbg_init, vha, 0x014b,
+ "release = '%s', version = '%s'\n",
+ phost_info->release,
+ phost_info->version);
+ ql_dbg(ql_dbg_init, vha, 0x014c,
+ "machine = '%s' "
+ "domainname = '%s', hostdriver = '%s'\n",
+ phost_info->machine,
+ phost_info->domainname,
+ phost_info->hostdriver);
+ ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
+ (uint8_t *)phost_info,
+ sizeof(struct host_system_info));
+ }
+ }
+
+ if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+ fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
+ fdisc->u.fxiocb.rsp_len,
+ &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
+ if (!fdisc->u.fxiocb.rsp_addr)
+ goto done_unmap_req;
+ }
+
+ sp->type = SRB_FXIOCB_DCMD;
+ sp->name = "fxdisc";
+ qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+ fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+ fdisc->u.fxiocb.req_func_type = fx_type;
+ sp->done = qla2x00_fxdisc_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_unmap_dma;
+
+ wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
+
+ if (fx_type == FXDISC_GET_CONFIG_INFO) {
+ struct config_info_data *pinfo =
+ (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+ memcpy(&vha->hw->mr.product_name, pinfo->product_name,
+ sizeof(vha->hw->mr.product_name));
+ memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+ sizeof(vha->hw->mr.symbolic_name));
+ memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
+ sizeof(vha->hw->mr.serial_num));
+ memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
+ sizeof(vha->hw->mr.hw_version));
+ memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
+ sizeof(vha->hw->mr.fw_version));
+ strim(vha->hw->mr.fw_version);
+ memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
+ sizeof(vha->hw->mr.uboot_version));
+ memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
+ sizeof(vha->hw->mr.fru_serial_num));
+ } else if (fx_type == FXDISC_GET_PORT_INFO) {
+ struct port_info_data *pinfo =
+ (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
+ memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
+ memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
+ vha->d_id.b.domain = pinfo->port_id[0];
+ vha->d_id.b.area = pinfo->port_id[1];
+ vha->d_id.b.al_pa = pinfo->port_id[2];
+ qlafx00_update_host_attr(vha, pinfo);
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
+ (uint8_t *)pinfo, 16);
+ } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
+ struct qlafx00_tgt_node_info *pinfo =
+ (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+ memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
+ memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
+ fcport->port_type = FCT_TARGET;
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
+ (uint8_t *)pinfo, 16);
+ } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
+ struct qlafx00_tgt_node_info *pinfo =
+ (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
+ (uint8_t *)pinfo, 16);
+ memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
+ }
+ rval = fdisc->u.fxiocb.result;
+
+done_unmap_dma:
+ if (fdisc->u.fxiocb.rsp_addr)
+ dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
+ fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
+
+done_unmap_req:
+ if (fdisc->u.fxiocb.req_addr)
+ dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
+ fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+static void
+qlafx00_abort_iocb_timeout(void *data)
+{
+ srb_t *sp = (srb_t *)data;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ abt->u.abt.comp_status = CS_TIMEOUT;
+ complete(&abt->u.abt.comp);
+}
+
+static void
+qlafx00_abort_sp_done(void *data, void *ptr, int res)
+{
+ srb_t *sp = (srb_t *)ptr;
+ struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+ complete(&abt->u.abt.comp);
+}
+
+static int
+qlafx00_async_abt_cmd(srb_t *cmd_sp)
+{
+ scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ fc_port_t *fcport = cmd_sp->fcport;
+ struct srb_iocb *abt_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ abt_iocb = &sp->u.iocb_cmd;
+ sp->type = SRB_ABT_CMD;
+ sp->name = "abort";
+ qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+ abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+ sp->done = qlafx00_abort_sp_done;
+ abt_iocb->timeout = qlafx00_abort_iocb_timeout;
+ init_completion(&abt_iocb->u.abt.comp);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, target_id=%x\n",
+ cmd_sp->handle, fcport->tgt_id);
+
+ wait_for_completion(&abt_iocb->u.abt.comp);
+
+ rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+ QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+ sp->free(vha, sp);
+done:
+ return rval;
+}
+
+int
+qlafx00_abort_command(srb_t *sp)
+{
+ unsigned long flags = 0;
+
+ uint32_t handle;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = vha->req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
+ if (req->outstanding_cmds[handle] == sp)
+ break;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
+ /* Command not found. */
+ return QLA_FUNCTION_FAILED;
+ }
+ return qlafx00_async_abt_cmd(sp);
+}
+
+/*
+ * qlafx00_initialize_adapter
+ * Initialize board.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ * 0 = success
+ */
+int
+qlafx00_initialize_adapter(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Clear adapter flags. */
+ vha->flags.online = 0;
+ ha->flags.chip_reset_done = 0;
+ vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
+ ha->thermal_support = 0;
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ vha->device_flags = DFLG_NO_CABLE;
+ vha->dpc_flags = 0;
+ vha->flags.management_server_logged_in = 0;
+ vha->marker_needed = 0;
+ ha->isp_abort_cnt = 0;
+ ha->beacon_blink_led = 0;
+
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
+
+ ql_dbg(ql_dbg_init, vha, 0x0147,
+ "Configuring PCI space...\n");
+
+ rval = ha->isp_ops->pci_config(vha);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x0148,
+ "Unable to configure PCI space.\n");
+ return rval;
+ }
+
+ rval = qlafx00_init_fw_ready(vha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ qlafx00_save_queue_ptrs(vha);
+
+ rval = qlafx00_config_queues(vha);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
+ if (rval != QLA_SUCCESS)
+ return rval;
+
+ rval = qla2x00_init_rings(vha);
+ ha->flags.chip_reset_done = 1;
+
+ return rval;
+}
+
+uint32_t
+qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t state[1];
+
+ if (qla2x00_reset_active(vha))
+ ql_log(ql_log_warn, vha, 0x70ce,
+ "ISP reset active.\n");
+ else if (!vha->hw->flags.eeh_busy) {
+ rval = qlafx00_get_firmware_state(vha, state);
+ }
+ if (rval != QLA_SUCCESS)
+ memset(state, -1, sizeof(state));
+
+ return state[0];
+}
+
+void
+qlafx00_get_host_speed(struct Scsi_Host *shost)
+{
+ struct qla_hw_data *ha = ((struct scsi_qla_host *)
+ (shost_priv(shost)))->hw;
+ u32 speed = FC_PORTSPEED_UNKNOWN;
+
+ switch (ha->link_data_rate) {
+ case QLAFX00_PORT_SPEED_2G:
+ speed = FC_PORTSPEED_2GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_4G:
+ speed = FC_PORTSPEED_4GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_8G:
+ speed = FC_PORTSPEED_8GBIT;
+ break;
+ case QLAFX00_PORT_SPEED_10G:
+ speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ fc_host_speed(shost) = speed;
+}
+
+/** QLAFX00 specific ISR implementation functions */
+
+static inline void
+qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+ uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_cmnd *cp = GET_CMD_SP(sp);
+ uint32_t track_sense_len;
+
+ SET_FW_SENSE_LEN(sp, sense_len);
+
+ if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+ sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+ track_sense_len = sense_len;
+
+ if (sense_len > par_sense_len)
+ sense_len = par_sense_len;
+
+ memcpy(cp->sense_buffer, sense_data, sense_len);
+
+ SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
+
+ SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+ track_sense_len -= sense_len;
+ SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+ ql_dbg(ql_dbg_io, vha, 0x304d,
+ "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
+ sense_len, par_sense_len, track_sense_len);
+ if (GET_FW_SENSE_LEN(sp) > 0) {
+ rsp->status_srb = sp;
+ cp->result = res;
+ }
+
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
+ "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
+ cp->sense_buffer, sense_len);
+ }
+}
+
+static void
+qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
+ uint16_t sstatus, uint16_t cpstatus)
+{
+ struct srb_iocb *tmf;
+
+ tmf = &sp->u.iocb_cmd;
+ if (cpstatus != CS_COMPLETE ||
+ (sstatus & SS_RESPONSE_INFO_LEN_VALID))
+ cpstatus = CS_INCOMPLETE;
+ tmf->u.tmf.comp_status = cpstatus;
+ sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct abort_iocb_entry_fx00 *pkt)
+{
+ const char func[] = "ABT_IOCB";
+ srb_t *sp;
+ struct srb_iocb *abt;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ abt = &sp->u.iocb_cmd;
+ abt->u.abt.comp_status = le32_to_cpu(pkt->tgt_id_sts);
+ sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct ioctl_iocb_entry_fx00 *pkt)
+{
+ const char func[] = "IOSB_IOCB";
+ srb_t *sp;
+ struct fc_bsg_job *bsg_job;
+ struct srb_iocb *iocb_job;
+ int res;
+ struct qla_mt_iocb_rsp_fx00 fstatus;
+ uint8_t *fw_sts_ptr;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (sp->type == SRB_FXIOCB_DCMD) {
+ iocb_job = &sp->u.iocb_cmd;
+ iocb_job->u.fxiocb.seq_number = le32_to_cpu(pkt->seq_no);
+ iocb_job->u.fxiocb.fw_flags = le32_to_cpu(pkt->fw_iotcl_flags);
+ iocb_job->u.fxiocb.result = le32_to_cpu(pkt->status);
+ if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
+ iocb_job->u.fxiocb.req_data =
+ le32_to_cpu(pkt->dataword_r);
+ } else {
+ bsg_job = sp->u.bsg_job;
+
+ memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+ fstatus.reserved_1 = pkt->reserved_0;
+ fstatus.func_type = pkt->comp_func_num;
+ fstatus.ioctl_flags = pkt->fw_iotcl_flags;
+ fstatus.ioctl_data = pkt->dataword_r;
+ fstatus.adapid = pkt->adapid;
+ fstatus.adapid_hi = pkt->adapid_hi;
+ fstatus.reserved_2 = pkt->reserved_1;
+ fstatus.res_count = pkt->residuallen;
+ fstatus.status = pkt->status;
+ fstatus.seq_number = pkt->seq_no;
+ memcpy(fstatus.reserved_3,
+ pkt->reserved_2, 20 * sizeof(uint8_t));
+
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+
+ memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
+ sizeof(struct qla_mt_iocb_rsp_fx00));
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x5080,
+ (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x5074,
+ (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+ res = bsg_job->reply->result = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ }
+ sp->done(vha, sp, res);
+}
+
+/**
+ * qlafx00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+ srb_t *sp;
+ fc_port_t *fcport;
+ struct scsi_cmnd *cp;
+ struct sts_entry_fx00 *sts;
+ uint16_t comp_status;
+ uint16_t scsi_status;
+ uint16_t ox_id;
+ uint8_t lscsi_status;
+ int32_t resid;
+ uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
+ fw_resid_len;
+ uint8_t *rsp_info = NULL, *sense_data = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t hindex, handle;
+ uint16_t que;
+ struct req_que *req;
+ int logit = 1;
+ int res = 0;
+
+ sts = (struct sts_entry_fx00 *) pkt;
+
+ comp_status = le16_to_cpu(sts->comp_status);
+ scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+ hindex = sts->handle;
+ handle = LSW(hindex);
+
+ que = MSW(hindex);
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+ if (handle < req->num_outstanding_cmds)
+ sp = req->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3034,
+ "Invalid status handle (0x%x).\n", handle);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+
+ if (sp->type == SRB_TM_CMD) {
+ req->outstanding_cmds[handle] = NULL;
+ qlafx00_tm_iocb_entry(vha, req, pkt, sp,
+ scsi_status, comp_status);
+ return;
+ }
+
+ /* Fast path completion. */
+ if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_do_host_ramp_up(vha);
+ qla2x00_process_completed_request(vha, req, handle);
+ return;
+ }
+
+ req->outstanding_cmds[handle] = NULL;
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3048,
+ "Command already returned (0x%x/%p).\n",
+ handle, sp);
+
+ return;
+ }
+
+ lscsi_status = scsi_status & STATUS_MASK;
+
+ fcport = sp->fcport;
+
+ ox_id = 0;
+ sense_len = par_sense_len = rsp_info_len = resid_len =
+ fw_resid_len = 0;
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le32_to_cpu(sts->sense_len);
+ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+ resid_len = le32_to_cpu(sts->residual_len);
+ if (comp_status == CS_DATA_UNDERRUN)
+ fw_resid_len = le32_to_cpu(sts->residual_len);
+ rsp_info = sense_data = sts->data;
+ par_sense_len = sizeof(sts->data);
+
+ /* Check for overrun. */
+ if (comp_status == CS_COMPLETE &&
+ scsi_status & SS_RESIDUAL_OVER)
+ comp_status = CS_DATA_OVERRUN;
+
+ /*
+ * Based on Host and scsi status generate status code for Linux
+ */
+ switch (comp_status) {
+ case CS_COMPLETE:
+ case CS_QUEUE_FULL:
+ if (scsi_status == 0) {
+ res = DID_OK << 16;
+ break;
+ }
+ if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
+ resid = resid_len;
+ scsi_set_resid(cp, resid);
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ }
+ res = DID_OK << 16 | lscsi_status;
+
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
+ "QUEUE FULL detected.\n");
+ break;
+ }
+ logit = 0;
+ if (lscsi_status != SS_CHECK_CONDITION)
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status & SS_SENSE_LEN_VALID))
+ break;
+
+ qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+ rsp, res);
+ break;
+
+ case CS_DATA_UNDERRUN:
+ /* Use F/W calculated residual length. */
+ if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ resid = fw_resid_len;
+ else
+ resid = resid_len;
+ scsi_set_resid(cp, resid);
+ if (scsi_status & SS_RESIDUAL_UNDER) {
+ if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+ && fw_resid_len != resid_len) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
+ "Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
+ }
+
+ if (!lscsi_status &&
+ ((unsigned)(scsi_bufflen(cp) - resid) <
+ cp->underflow)) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
+ "Mid-layer underflow "
+ "detected (0x%x of 0x%x bytes, "
+ "cp->underflow: 0x%x).\n",
+ resid, scsi_bufflen(cp), cp->underflow);
+
+ res = DID_ERROR << 16;
+ break;
+ }
+ } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+ lscsi_status != SAM_STAT_BUSY) {
+ /*
+ * scsi status of task set and busy are considered
+ * to be task not completed.
+ */
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
+ "Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", resid,
+ scsi_bufflen(cp));
+
+ res = DID_ERROR << 16 | lscsi_status;
+ goto check_scsi_status;
+ } else {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
+ "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+ scsi_status, lscsi_status);
+ }
+
+ res = DID_OK << 16 | lscsi_status;
+ logit = 0;
+
+check_scsi_status:
+ /*
+ * Check to see if SCSI Status is non zero. If so report SCSI
+ * Status.
+ */
+ if (lscsi_status != 0) {
+ if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
+ "QUEUE FULL detected.\n");
+ logit = 1;
+ break;
+ }
+ if (lscsi_status != SS_CHECK_CONDITION)
+ break;
+
+ memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (!(scsi_status & SS_SENSE_LEN_VALID))
+ break;
+
+ qlafx00_handle_sense(sp, sense_data, par_sense_len,
+ sense_len, rsp, res);
+ }
+ break;
+
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_CONFIG_CHG:
+ case CS_PORT_BUSY:
+ case CS_INCOMPLETE:
+ case CS_PORT_UNAVAILABLE:
+ case CS_TIMEOUT:
+ case CS_RESET:
+
+ /*
+ * We are going to have the fc class block the rport
+ * while we try to recover so instruct the mid layer
+ * to requeue until the class decides how to handle this.
+ */
+ res = DID_TRANSPORT_DISRUPTED << 16;
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
+ "Port down status: port-state=0x%x.\n",
+ atomic_read(&fcport->state));
+
+ if (atomic_read(&fcport->state) == FCS_ONLINE)
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ break;
+
+ case CS_ABORTED:
+ res = DID_RESET << 16;
+ break;
+
+ default:
+ res = DID_ERROR << 16;
+ break;
+ }
+
+ if (logit)
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
+ "FCP command status: 0x%x-0x%x (0x%x) "
+ "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
+ "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
+ "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->tgt_id,
+ lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
+ cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
+ cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+ rsp_info_len, resid_len, fw_resid_len, sense_len,
+ par_sense_len, rsp_info_len);
+
+ if (!res)
+ qla2x00_do_host_ramp_up(vha);
+
+ if (rsp->status_srb == NULL)
+ sp->done(ha, sp, res);
+}
+
+/**
+ * qlafx00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+ uint8_t sense_sz = 0;
+ struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp = rsp->status_srb;
+ struct scsi_cmnd *cp;
+ uint32_t sense_len;
+ uint8_t *sense_ptr;
+
+ if (!sp) {
+ ql_dbg(ql_dbg_io, vha, 0x3037,
+ "no SP, sp = %p\n", sp);
+ return;
+ }
+
+ if (!GET_FW_SENSE_LEN(sp)) {
+ ql_dbg(ql_dbg_io, vha, 0x304b,
+ "no fw sense data, sp = %p\n", sp);
+ return;
+ }
+ cp = GET_CMD_SP(sp);
+ if (cp == NULL) {
+ ql_log(ql_log_warn, vha, 0x303b,
+ "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+ rsp->status_srb = NULL;
+ return;
+ }
+
+ if (!GET_CMD_SENSE_LEN(sp)) {
+ ql_dbg(ql_dbg_io, vha, 0x304c,
+ "no sense data, sp = %p\n", sp);
+ } else {
+ sense_len = GET_CMD_SENSE_LEN(sp);
+ sense_ptr = GET_CMD_SENSE_PTR(sp);
+ ql_dbg(ql_dbg_io, vha, 0x304f,
+ "sp=%p sense_len=0x%x sense_ptr=%p.\n",
+ sp, sense_len, sense_ptr);
+
+ if (sense_len > sizeof(pkt->data))
+ sense_sz = sizeof(pkt->data);
+ else
+ sense_sz = sense_len;
+
+ /* Move sense data. */
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
+ (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+ memcpy(sense_ptr, pkt->data, sense_sz);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
+ sense_ptr, sense_sz);
+
+ sense_len -= sense_sz;
+ sense_ptr += sense_sz;
+
+ SET_CMD_SENSE_PTR(sp, sense_ptr);
+ SET_CMD_SENSE_LEN(sp, sense_len);
+ }
+ sense_len = GET_FW_SENSE_LEN(sp);
+ sense_len = (sense_len > sizeof(pkt->data)) ?
+ (sense_len - sizeof(pkt->data)) : 0;
+ SET_FW_SENSE_LEN(sp, sense_len);
+
+ /* Place command on done queue. */
+ if (sense_len == 0) {
+ rsp->status_srb = NULL;
+ sp->done(ha, sp, cp->result);
+ }
+}
+
+/**
+ * qlafx00_multistatus_entry() - Process Multi response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, void *pkt)
+{
+ srb_t *sp;
+ struct multi_sts_entry_fx00 *stsmfx;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t handle, hindex, handle_count, i;
+ uint16_t que;
+ struct req_que *req;
+ uint32_t *handle_ptr;
+
+ stsmfx = (struct multi_sts_entry_fx00 *) pkt;
+
+ handle_count = stsmfx->handle_count;
+
+ if (handle_count > MAX_HANDLE_COUNT) {
+ ql_dbg(ql_dbg_io, vha, 0x3035,
+ "Invalid handle count (0x%x).\n", handle_count);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+
+ handle_ptr = (uint32_t *) &stsmfx->handles[0];
+
+ for (i = 0; i < handle_count; i++) {
+ hindex = le32_to_cpu(*handle_ptr);
+ handle = LSW(hindex);
+ que = MSW(hindex);
+ req = ha->req_q_map[que];
+
+ /* Validate handle. */
+ if (handle < req->num_outstanding_cmds)
+ sp = req->outstanding_cmds[handle];
+ else
+ sp = NULL;
+
+ if (sp == NULL) {
+ ql_dbg(ql_dbg_io, vha, 0x3044,
+ "Invalid status handle (0x%x).\n", handle);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ return;
+ }
+ qla2x00_process_completed_request(vha, req, handle);
+ handle_ptr++;
+ }
+}
+
+/**
+ * qlafx00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
+ struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ const char func[] = "ERROR-IOCB";
+ uint16_t que = MSW(pkt->handle);
+ struct req_que *req = NULL;
+ int res = DID_ERROR << 16;
+
+ ql_dbg(ql_dbg_async, vha, 0x507f,
+ "type of error status in response: 0x%x\n", estatus);
+
+ req = ha->req_q_map[que];
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (sp) {
+ sp->done(ha, sp, res);
+ return;
+ }
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qlafx00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_process_response_queue(struct scsi_qla_host *vha,
+ struct rsp_que *rsp)
+{
+ struct sts_entry_fx00 *pkt;
+ response_t *lptr;
+
+ if (!vha->flags.online)
+ return;
+
+ while (RD_REG_DWORD(&(rsp->ring_ptr->signature)) !=
+ RESPONSE_PROCESSED) {
+ lptr = rsp->ring_ptr;
+ memcpy_fromio(rsp->rsp_pkt, lptr, sizeof(rsp->rsp_pkt));
+ pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
+
+ rsp->ring_index++;
+ if (rsp->ring_index == rsp->length) {
+ rsp->ring_index = 0;
+ rsp->ring_ptr = rsp->ring;
+ } else {
+ rsp->ring_ptr++;
+ }
+
+ if (pkt->entry_status != 0 &&
+ pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+ qlafx00_error_entry(vha, rsp,
+ (struct sts_entry_fx00 *)pkt, pkt->entry_status,
+ pkt->entry_type);
+ goto next_iter;
+ continue;
+ }
+
+ switch (pkt->entry_type) {
+ case STATUS_TYPE_FX00:
+ qlafx00_status_entry(vha, rsp, pkt);
+ break;
+
+ case STATUS_CONT_TYPE_FX00:
+ qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+ break;
+
+ case MULTI_STATUS_TYPE_FX00:
+ qlafx00_multistatus_entry(vha, rsp, pkt);
+ break;
+
+ case ABORT_IOCB_TYPE_FX00:
+ qlafx00_abort_iocb_entry(vha, rsp->req,
+ (struct abort_iocb_entry_fx00 *)pkt);
+ break;
+
+ case IOCTL_IOSB_TYPE_FX00:
+ qlafx00_ioctl_iosb_entry(vha, rsp->req,
+ (struct ioctl_iocb_entry_fx00 *)pkt);
+ break;
+ default:
+ /* Type Not Supported. */
+ ql_dbg(ql_dbg_async, vha, 0x5081,
+ "Received unknown response pkt type %x "
+ "entry status=%x.\n",
+ pkt->entry_type, pkt->entry_status);
+ break;
+ }
+next_iter:
+ WRT_REG_DWORD(&lptr->signature, RESPONSE_PROCESSED);
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+/**
+ * qlafx00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_async_event(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg;
+ int data_size = 1;
+
+ reg = &ha->iobase->ispfx00;
+ /* Setup to process RIO completion. */
+ switch (ha->aenmb[0]) {
+ case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
+ ql_log(ql_log_warn, vha, 0x5079,
+ "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
+ ql_dbg(ql_dbg_async, vha, 0x5076,
+ "Asynchronous FW shutdown requested.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+
+ case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
+ ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+ ql_dbg(ql_dbg_async, vha, 0x5077,
+ "Asynchronous port Update received "
+ "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
+ ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
+ data_size = 4;
+ break;
+ default:
+ ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+ ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+ ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+ ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
+ ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
+ ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
+ ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ql_dbg(ql_dbg_async, vha, 0x5078,
+ "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
+ ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
+ ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
+ break;
+ }
+ qlafx00_post_aenfx_work(vha, ha->aenmb[0],
+ (uint32_t *)ha->aenmb, data_size);
+}
+
+/**
+ *
+ * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb16: Mailbox16 register
+ */
+static void
+qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
+{
+ uint16_t cnt;
+ uint16_t __iomem *wptr;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+ if (!ha->mcp32)
+ ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
+
+ /* Load return mailbox registers. */
+ ha->flags.mbox_int = 1;
+ ha->mailbox_out32[0] = mb0;
+ wptr = (uint16_t __iomem *)&reg->mailbox17;
+
+ for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+ ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+ wptr++;
+ }
+}
+
+/**
+ * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qlafx00_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct device_reg_fx00 __iomem *reg;
+ int status;
+ unsigned long iter;
+ uint32_t stat;
+ uint32_t mb[8];
+ struct rsp_que *rsp;
+ unsigned long flags;
+ uint32_t clr_intr = 0;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0x507d,
+ "%s: NULL response queue pointer.\n", __func__);
+ return IRQ_NONE;
+ }
+
+ ha = rsp->hw;
+ reg = &ha->iobase->ispfx00;
+ status = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
+ for (iter = 50; iter--; clr_intr = 0) {
+ stat = QLAFX00_RD_INTR_REG(ha);
+ if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+ break;
+
+ switch (stat & QLAFX00_HST_INT_STS_BITS) {
+ case QLAFX00_INTR_MB_CMPLT:
+ case QLAFX00_INTR_MB_RSP_CMPLT:
+ case QLAFX00_INTR_MB_ASYNC_CMPLT:
+ case QLAFX00_INTR_ALL_CMPLT:
+ mb[0] = RD_REG_WORD(&reg->mailbox16);
+ qlafx00_mbx_completion(vha, mb[0]);
+ status |= MBX_INTERRUPT;
+ clr_intr |= QLAFX00_INTR_MB_CMPLT;
+ break;
+ case QLAFX00_INTR_ASYNC_CMPLT:
+ case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+ ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ qlafx00_async_event(vha);
+ clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
+ break;
+ case QLAFX00_INTR_RSP_CMPLT:
+ qlafx00_process_response_queue(vha, rsp);
+ clr_intr |= QLAFX00_INTR_RSP_CMPLT;
+ break;
+ default:
+ ql_dbg(ql_dbg_async, vha, 0x507a,
+ "Unrecognized interrupt type (%d).\n", stat);
+ break;
+ }
+ QLAFX00_CLR_INTR_REG(ha, clr_intr);
+ QLAFX00_RD_INTR_REG(ha);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+ (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+ set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+ complete(&ha->mbx_intr_comp);
+ }
+ return IRQ_HANDLED;
+}
+
+/** QLAFX00 specific IOCB implementation functions */
+
+static inline cont_a64_entry_t *
+qlafx00_prep_cont_type1_iocb(struct req_que *req,
+ cont_a64_entry_t *lcont_pkt)
+{
+ cont_a64_entry_t *cont_pkt;
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+ /* Load packet defaults. */
+ *((uint32_t *)(&lcont_pkt->entry_type)) =
+ __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00);
+
+ return cont_pkt;
+}
+
+static inline void
+qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
+ uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ scsi_qla_host_t *vha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *sg;
+ int i, cont;
+ struct req_que *req;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt;
+
+ vha = sp->fcport->vha;
+ req = vha->req;
+
+ cmd = GET_CMD_SP(sp);
+ cont = 0;
+ cont_pkt = NULL;
+
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&lcmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(FX00_COMMAND_TYPE_7);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return;
+ }
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ lcmd_pkt->cntrl_flags =
+ __constant_cpu_to_le16(TMF_WRITE_DATA);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ lcmd_pkt->cntrl_flags =
+ __constant_cpu_to_le16(TMF_READ_DATA);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ /* One DSD is available in the Command Type 3 IOCB */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&lcmd_pkt->dseg_0_address;
+
+ /* Load data segments */
+ scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
+ cur_dsd = (uint32_t *)lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+}
+
+/**
+ * qlafx00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qlafx00_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct cmd_type_7_fx00 *cmd_pkt;
+ struct cmd_type_7_fx00 lcmd_pkt;
+ struct scsi_lun llun;
+ char tag[2];
+
+ /* Setup device pointers. */
+ ret = 0;
+
+ rsp = ha->rsp_q_map[0];
+ req = vha->req;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Forcing marker needed for now */
+ vha->marker_needed = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == req->num_outstanding_cmds)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
+
+ memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
+
+ lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.handle_hi = 0;
+ lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
+ lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
+
+ int_to_scsilun(cmd->device->lun, &llun);
+ host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
+ sizeof(lcmd_pkt.lun));
+
+ /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ lcmd_pkt.task = TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Load SCSI command packet. */
+ host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
+ lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
+
+ /* Set total data segment count. */
+ lcmd_pkt.entry_count = (uint8_t)req_cnt;
+
+ /* Specify response queue number where completion should happen */
+ lcmd_pkt.entry_status = (uint8_t) rsp->id;
+
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+ (uint8_t *)cmd->cmnd, cmd->cmd_len);
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
+ (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+
+ memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
+void
+qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+ struct tsk_mgmt_entry_fx00 tm_iocb;
+ struct scsi_lun llun;
+
+ memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
+ tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
+ tm_iocb.entry_count = 1;
+ tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.handle_hi = 0;
+ tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+ tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
+ tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
+ if (tm_iocb.control_flags == TCF_LUN_RESET) {
+ int_to_scsilun(fxio->u.tmf.lun, &llun);
+ host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
+ sizeof(struct scsi_lun));
+ }
+
+ memcpy((void __iomem *)ptm_iocb, &tm_iocb,
+ sizeof(struct tsk_mgmt_entry_fx00));
+ wmb();
+}
+
+void
+qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct req_que *req = vha->req;
+ struct abort_iocb_entry_fx00 abt_iocb;
+
+ memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
+ abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
+ abt_iocb.entry_count = 1;
+ abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb.abort_handle =
+ cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
+ abt_iocb.req_que_no = cpu_to_le16(req->id);
+
+ memcpy((void __iomem *)pabt_iocb, &abt_iocb,
+ sizeof(struct abort_iocb_entry_fx00));
+ wmb();
+}
+
+void
+qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
+{
+ struct srb_iocb *fxio = &sp->u.iocb_cmd;
+ struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+ struct fc_bsg_job *bsg_job;
+ struct fxdisc_entry_fx00 fx_iocb;
+ uint8_t entry_cnt = 1;
+
+ memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
+ fx_iocb.entry_type = FX00_IOCB_TYPE;
+ fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.entry_count = entry_cnt;
+
+ if (sp->type == SRB_FXIOCB_DCMD) {
+ fx_iocb.func_num =
+ cpu_to_le16(sp->u.iocb_cmd.u.fxiocb.req_func_type);
+ fx_iocb.adapid = cpu_to_le32(fxio->u.fxiocb.adapter_id);
+ fx_iocb.adapid_hi = cpu_to_le32(fxio->u.fxiocb.adapter_id_hi);
+ fx_iocb.reserved_0 = cpu_to_le32(fxio->u.fxiocb.reserved_0);
+ fx_iocb.reserved_1 = cpu_to_le32(fxio->u.fxiocb.reserved_1);
+ fx_iocb.dataword_extra =
+ cpu_to_le32(fxio->u.fxiocb.req_data_extra);
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+ fx_iocb.req_dsdcnt = cpu_to_le16(1);
+ fx_iocb.req_xfrcnt =
+ cpu_to_le16(fxio->u.fxiocb.req_len);
+ fx_iocb.dseg_rq_address[0] =
+ cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
+ fx_iocb.dseg_rq_address[1] =
+ cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
+ fx_iocb.dseg_rq_len =
+ cpu_to_le32(fxio->u.fxiocb.req_len);
+ }
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+ fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
+ fx_iocb.rsp_xfrcnt =
+ cpu_to_le16(fxio->u.fxiocb.rsp_len);
+ fx_iocb.dseg_rsp_address[0] =
+ cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
+ fx_iocb.dseg_rsp_address[1] =
+ cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
+ fx_iocb.dseg_rsp_len =
+ cpu_to_le32(fxio->u.fxiocb.rsp_len);
+ }
+
+ if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
+ fx_iocb.dataword =
+ cpu_to_le32(fxio->u.fxiocb.req_data);
+ }
+ fx_iocb.flags = fxio->u.fxiocb.flags;
+ } else {
+ struct scatterlist *sg;
+ bsg_job = sp->u.bsg_job;
+ piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+ &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+ fx_iocb.func_num = piocb_rqst->func_type;
+ fx_iocb.adapid = piocb_rqst->adapid;
+ fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
+ fx_iocb.reserved_0 = piocb_rqst->reserved_0;
+ fx_iocb.reserved_1 = piocb_rqst->reserved_1;
+ fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
+ fx_iocb.dataword = piocb_rqst->dataword;
+ fx_iocb.req_xfrcnt = cpu_to_le16(piocb_rqst->req_len);
+ fx_iocb.rsp_xfrcnt = cpu_to_le16(piocb_rqst->rsp_len);
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+ int avail_dsds, tot_dsds;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt = NULL;
+ uint32_t *cur_dsd;
+ int index = 0, cont = 0;
+
+ fx_iocb.req_dsdcnt =
+ cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ tot_dsds =
+ cpu_to_le32(bsg_job->request_payload.sg_cnt);
+ cur_dsd = (uint32_t *)&fx_iocb.dseg_rq_address[0];
+ avail_dsds = 1;
+ for_each_sg(bsg_job->request_payload.sg_list, sg,
+ tot_dsds, index) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0,
+ REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(
+ sp->fcport->vha->req,
+ &lcont_pkt);
+ cur_dsd = (uint32_t *)
+ lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ entry_cnt++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio(
+ (void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(
+ ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3042,
+ (uint8_t *)&lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3043,
+ (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+ int avail_dsds, tot_dsds;
+ cont_a64_entry_t lcont_pkt;
+ cont_a64_entry_t *cont_pkt = NULL;
+ uint32_t *cur_dsd;
+ int index = 0, cont = 0;
+
+ fx_iocb.rsp_dsdcnt =
+ cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ tot_dsds = cpu_to_le32(bsg_job->reply_payload.sg_cnt);
+ cur_dsd = (uint32_t *)&fx_iocb.dseg_rsp_address[0];
+ avail_dsds = 1;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg,
+ tot_dsds, index) {
+ dma_addr_t sle_dma;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ memset(&lcont_pkt, 0,
+ REQUEST_ENTRY_SIZE);
+ cont_pkt =
+ qlafx00_prep_cont_type1_iocb(
+ sp->fcport->vha->req,
+ &lcont_pkt);
+ cur_dsd = (uint32_t *)
+ lcont_pkt.dseg_0_address;
+ avail_dsds = 5;
+ cont = 1;
+ entry_cnt++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+
+ if (avail_dsds == 0 && cont == 1) {
+ cont = 0;
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(
+ ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3045,
+ (uint8_t *)&lcont_pkt,
+ REQUEST_ENTRY_SIZE);
+ }
+ }
+ if (avail_dsds != 0 && cont == 1) {
+ memcpy_toio((void __iomem *)cont_pkt,
+ &lcont_pkt, REQUEST_ENTRY_SIZE);
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3046,
+ (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+ }
+ }
+
+ if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
+ fx_iocb.dataword = cpu_to_le32(piocb_rqst->dataword);
+ fx_iocb.flags = piocb_rqst->flags;
+ fx_iocb.entry_count = entry_cnt;
+ }
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+ sp->fcport->vha, 0x3047,
+ (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+
+ memcpy((void __iomem *)pfxiocb, &fx_iocb,
+ sizeof(struct fxdisc_entry_fx00));
+ wmb();
+}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644
index 000000000000..cc327dc2fd10
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -0,0 +1,510 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_MR_H
+#define __QLA_MR_H
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001
+
+/* FX00 specific definitions */
+
+#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */
+struct cmd_type_7_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t handle_hi;
+
+ uint16_t tgt_idx; /* Target Idx. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint16_t scsi_rsp_dsd_len;
+
+ struct scsi_lun lun; /* LUN (LE). */
+
+ uint8_t cntrl_flags;
+
+ uint8_t task_mgmt_flags; /* Task management flags. */
+
+ uint8_t task;
+
+ uint8_t crn;
+
+ uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
+ uint32_t byte_count; /* Total byte count. */
+
+ uint32_t dseg_0_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_0_len; /* Data segment 0 length. */
+};
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+struct mrk_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t handle_count; /* Handle count. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t handle_hi; /* System handle. */
+
+ uint16_t tgt_id; /* Target ID. */
+
+ uint8_t modifier; /* Modifier (7-0). */
+ uint8_t reserved_1;
+
+ uint8_t reserved_2[5];
+
+ uint8_t lun[8]; /* FCP LUN (BE). */
+ uint8_t reserved_3[36];
+};
+
+
+#define STATUS_TYPE_FX00 0x01 /* Status entry. */
+struct sts_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t handle_hi; /* System handle. */
+
+ uint16_t comp_status; /* Completion status. */
+ uint16_t reserved_0; /* OX_ID used by the firmware. */
+
+ uint32_t residual_len; /* FW calc residual transfer length. */
+
+ uint16_t reserved_1;
+ uint16_t state_flags; /* State flags. */
+
+ uint16_t reserved_2;
+ uint16_t scsi_status; /* SCSI status. */
+
+ uint32_t sense_len; /* FCP SENSE length. */
+ uint8_t data[32]; /* FCP response/sense information. */
+};
+
+
+#define MAX_HANDLE_COUNT 15
+#define MULTI_STATUS_TYPE_FX00 0x0D
+
+struct multi_sts_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t handle_count;
+ uint8_t entry_status;
+
+ uint32_t handles[MAX_HANDLE_COUNT];
+};
+
+#define TSK_MGMT_IOCB_TYPE_FX00 0x05
+struct tsk_mgmt_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define;
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint32_t handle_hi; /* System handle. */
+
+ uint16_t tgt_id; /* Target Idx. */
+
+ uint16_t reserved_1;
+
+ uint16_t delay; /* Activity delay in seconds. */
+
+ uint16_t timeout; /* Command timeout. */
+
+ struct scsi_lun lun; /* LUN (LE). */
+
+ uint32_t control_flags; /* Control Flags. */
+
+ uint8_t reserved_2[32];
+};
+
+
+#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */
+struct abort_iocb_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t handle_hi; /* System handle. */
+
+ uint16_t tgt_id_sts; /* Completion status. */
+ uint16_t options;
+
+ uint32_t abort_handle; /* System handle. */
+ uint32_t abort_handle_hi; /* System handle. */
+
+ uint16_t req_que_no;
+ uint8_t reserved_1[38];
+};
+
+#define IOCTL_IOSB_TYPE_FX00 0x0C
+struct ioctl_iocb_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t reserved_0; /* System handle. */
+
+ uint16_t comp_func_num;
+ uint16_t fw_iotcl_flags;
+
+ uint32_t dataword_r; /* Data word returned */
+ uint32_t adapid; /* Adapter ID */
+ uint32_t adapid_hi; /* Adapter ID high */
+ uint32_t reserved_1;
+
+ uint32_t seq_no;
+ uint8_t reserved_2[20];
+ uint32_t residuallen;
+ uint32_t status;
+};
+
+#define STATUS_CONT_TYPE_FX00 0x04
+
+#define FX00_IOCB_TYPE 0x0B
+struct fxdisc_entry_fx00 {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint32_t reserved_0; /* System handle. */
+
+ uint16_t func_num;
+ uint16_t req_xfrcnt;
+ uint16_t req_dsdcnt;
+ uint16_t rsp_xfrcnt;
+ uint16_t rsp_dsdcnt;
+ uint8_t flags;
+ uint8_t reserved_1;
+
+ uint32_t dseg_rq_address[2]; /* Data segment 0 address. */
+ uint32_t dseg_rq_len; /* Data segment 0 length. */
+ uint32_t dseg_rsp_address[2]; /* Data segment 1 address. */
+ uint32_t dseg_rsp_len; /* Data segment 1 length. */
+
+ uint32_t dataword;
+ uint32_t adapid;
+ uint32_t adapid_hi;
+ uint32_t dataword_extra;
+};
+
+struct qlafx00_tgt_node_info {
+ uint8_t tgt_node_wwpn[WWN_SIZE];
+ uint8_t tgt_node_wwnn[WWN_SIZE];
+ uint32_t tgt_node_state;
+ uint8_t reserved[128];
+ uint32_t reserved_1[8];
+ uint64_t reserved_2[4];
+} __packed;
+
+#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
+
+#define QLAFX00_LINK_STATUS_DOWN 0x10
+#define QLAFX00_LINK_STATUS_UP 0x11
+
+#define QLAFX00_PORT_SPEED_2G 0x2
+#define QLAFX00_PORT_SPEED_4G 0x4
+#define QLAFX00_PORT_SPEED_8G 0x8
+#define QLAFX00_PORT_SPEED_10G 0xa
+struct port_info_data {
+ uint8_t port_state;
+ uint8_t port_type;
+ uint16_t port_identifier;
+ uint32_t up_port_state;
+ uint8_t fw_ver_num[32];
+ uint8_t portal_attrib;
+ uint16_t host_option;
+ uint8_t reset_delay;
+ uint8_t pdwn_retry_cnt;
+ uint16_t max_luns2tgt;
+ uint8_t risc_ver;
+ uint8_t pconn_option;
+ uint16_t risc_option;
+ uint16_t max_frame_len;
+ uint16_t max_iocb_alloc;
+ uint16_t exec_throttle;
+ uint8_t retry_cnt;
+ uint8_t retry_delay;
+ uint8_t port_name[8];
+ uint8_t port_id[3];
+ uint8_t link_status;
+ uint8_t plink_rate;
+ uint32_t link_config;
+ uint16_t adap_haddr;
+ uint8_t tgt_disc;
+ uint8_t log_tout;
+ uint8_t node_name[8];
+ uint16_t erisc_opt1;
+ uint8_t resp_acc_tmr;
+ uint8_t intr_del_tmr;
+ uint8_t erisc_opt2;
+ uint8_t alt_port_name[8];
+ uint8_t alt_node_name[8];
+ uint8_t link_down_tout;
+ uint8_t conn_type;
+ uint8_t fc_fw_mode;
+ uint32_t uiReserved[48];
+} __packed;
+
+/* OS Type Designations */
+#define OS_TYPE_UNKNOWN 0
+#define OS_TYPE_LINUX 2
+
+/* Linux Info */
+#define SYSNAME_LENGTH 128
+#define NODENAME_LENGTH 64
+#define RELEASE_LENGTH 64
+#define VERSION_LENGTH 64
+#define MACHINE_LENGTH 64
+#define DOMNAME_LENGTH 64
+
+struct host_system_info {
+ uint32_t os_type;
+ char sysname[SYSNAME_LENGTH];
+ char nodename[NODENAME_LENGTH];
+ char release[RELEASE_LENGTH];
+ char version[VERSION_LENGTH];
+ char machine[MACHINE_LENGTH];
+ char domainname[DOMNAME_LENGTH];
+ char hostdriver[VERSION_LENGTH];
+ uint32_t reserved[64];
+} __packed;
+
+struct register_host_info {
+ struct host_system_info hsi; /* host system info */
+ uint64_t utc; /* UTC (system time) */
+ uint32_t reserved[64]; /* future additions */
+} __packed;
+
+
+#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
+#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
+
+struct config_info_data {
+ uint8_t product_name[256];
+ uint8_t symbolic_name[64];
+ uint8_t serial_num[32];
+ uint8_t hw_version[16];
+ uint8_t fw_version[16];
+ uint8_t uboot_version[16];
+ uint8_t fru_serial_num[32];
+
+ uint8_t fc_port_count;
+ uint8_t iscsi_port_count;
+ uint8_t reserved1[2];
+
+ uint8_t mode;
+ uint8_t log_level;
+ uint8_t reserved2[2];
+
+ uint32_t log_size;
+
+ uint8_t tgt_pres_mode;
+ uint8_t iqn_flags;
+ uint8_t lun_mapping;
+
+ uint64_t adapter_id;
+
+ uint32_t cluster_key_len;
+ uint8_t cluster_key[10];
+
+ uint64_t cluster_master_id;
+ uint64_t cluster_slave_id;
+ uint8_t cluster_flags;
+} __packed;
+
+#define FXDISC_GET_CONFIG_INFO 0x01
+#define FXDISC_GET_PORT_INFO 0x02
+#define FXDISC_GET_TGT_NODE_INFO 0x80
+#define FXDISC_GET_TGT_NODE_LIST 0x81
+#define FXDISC_REG_HOST_INFO 0x99
+
+#define QLAFX00_HBA_ICNTRL_REG 0x21B08
+#define QLAFX00_ICR_ENB_MASK 0x80000000
+#define QLAFX00_ICR_DIS_MASK 0x7fffffff
+#define QLAFX00_HST_RST_REG 0x18264
+#define QLAFX00_HST_TO_HBA_REG 0x20A04
+#define QLAFX00_HBA_TO_HOST_REG 0x21B70
+#define QLAFX00_HST_INT_STS_BITS 0x7
+#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018
+#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824
+
+#define QLAFX00_INTR_MB_CMPLT 0x1
+#define QLAFX00_INTR_RSP_CMPLT 0x2
+#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
+#define QLAFX00_INTR_ASYNC_CMPLT 0x4
+#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
+#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
+#define QLAFX00_INTR_ALL_CMPLT 0x7
+
+#define QLAFX00_MBA_SYSTEM_ERR 0x8002
+#define QLAFX00_MBA_LINK_UP 0x8011
+#define QLAFX00_MBA_LINK_DOWN 0x8012
+#define QLAFX00_MBA_PORT_UPDATE 0x8014
+#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062
+
+#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800
+#define SOC_FABRIC_RST_CONTROL_REG 0x0020840
+#define SOC_FABRIC_CONTROL_REG 0x0020200
+#define SOC_FABRIC_CONFIG_REG 0x0020204
+
+#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00
+#define SOC_CORE_TIMER_REG 0x0021850
+#define SOC_IRQ_ACK_REG 0x00218b4
+
+#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
+
+#define QLAFX00_SET_HST_INTR(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ value)
+
+#define QLAFX00_CLR_HST_INTR(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ ~value)
+
+#define QLAFX00_RD_INTR_REG(ha) \
+ RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+
+#define QLAFX00_CLR_INTR_REG(ha, value) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ ~value)
+
+#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
+ WRT_REG_DWORD((ha)->cregbase + off, val)
+
+#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
+ RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_HBA_RST_REG(ha, val)\
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+
+#define QLAFX00_RD_ICNTRL_REG(ha) \
+ RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+
+#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
+ QLAFX00_ICR_ENB_MASK))
+
+#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
+ WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
+ QLAFX00_ICR_DIS_MASK))
+
+#define QLAFX00_RD_REG(ha, off) \
+ RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_WR_REG(ha, off, val) \
+ WRT_REG_DWORD((ha)->cregbase + off, val)
+
+struct qla_mt_iocb_rqst_fx00 {
+ uint32_t reserved_0;
+
+ uint16_t func_type;
+ uint8_t flags;
+ uint8_t reserved_1;
+
+ uint32_t dataword;
+
+ uint32_t adapid;
+ uint32_t adapid_hi;
+
+ uint32_t dataword_extra;
+
+ uint32_t req_len;
+
+ uint32_t rsp_len;
+};
+
+struct qla_mt_iocb_rsp_fx00 {
+ uint32_t reserved_1;
+
+ uint16_t func_type;
+ uint16_t ioctl_flags;
+
+ uint32_t ioctl_data;
+
+ uint32_t adapid;
+ uint32_t adapid_hi;
+
+ uint32_t reserved_2;
+ uint32_t seq_number;
+
+ uint8_t reserved_3[20];
+
+ int32_t res_count;
+
+ uint32_t status;
+};
+
+
+#define MAILBOX_REGISTER_COUNT_FX00 16
+#define AEN_MAILBOX_REGISTER_COUNT_FX00 8
+#define MAX_FIBRE_DEVICES_FX00 512
+#define MAX_LUNS_FX00 0x1024
+#define MAX_TARGETS_FX00 MAX_ISA_DEVICES
+#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
+
+/*
+ * Firmware state codes for QLAFX00 adapters
+ */
+#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue
+ * Initialize FW Mbox cmd
+ */
+#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by
+ * the driver
+ */
+
+#define FX00_DEF_RATOV 10
+
+struct mr_data_fx00 {
+ uint8_t product_name[256];
+ uint8_t symbolic_name[64];
+ uint8_t serial_num[32];
+ uint8_t hw_version[16];
+ uint8_t fw_version[16];
+ uint8_t uboot_version[16];
+ uint8_t fru_serial_num[32];
+ fc_port_t fcport; /* fcport used for requests
+ * that are not linked
+ * to a particular target
+ */
+ uint8_t fw_hbt_en;
+ uint8_t fw_hbt_cnt;
+ uint8_t fw_hbt_miss_cnt;
+ uint32_t old_fw_hbt_cnt;
+ uint16_t fw_reset_timer_tick;
+ uint8_t fw_reset_timer_exp;
+ uint32_t old_aenmbx0_state;
+};
+
+#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
+#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
+#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
+#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
+#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2c6dd3dfe0f4..a083715843bd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -47,6 +47,7 @@ MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
"beginning. Default is 0 - class 2 not supported.");
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -354,7 +355,12 @@ fail_req_map:
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
- if (req && req->ring)
+ if (IS_QLAFX00(ha)) {
+ if (req && req->ring_fx00)
+ dma_free_coherent(&ha->pdev->dev,
+ (req->length_fx00 + 1) * sizeof(request_t),
+ req->ring_fx00, req->dma_fx00);
+ } else if (req && req->ring)
dma_free_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
@@ -368,11 +374,16 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
- if (rsp && rsp->ring)
+ if (IS_QLAFX00(ha)) {
+ if (rsp && rsp->ring)
+ dma_free_coherent(&ha->pdev->dev,
+ (rsp->length_fx00 + 1) * sizeof(request_t),
+ rsp->ring_fx00, rsp->dma_fx00);
+ } else if (rsp && rsp->ring) {
dma_free_coherent(&ha->pdev->dev,
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
-
+ }
kfree(rsp);
rsp = NULL;
}
@@ -633,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
-static void
+void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -657,6 +668,9 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
cmd->scsi_done(cmd);
}
+/* If we are SP1 here, we need to still take and release the host_lock as SP1
+ * does not have the changes necessary to avoid taking host->host_lock.
+ */
static int
qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
@@ -1304,6 +1318,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
}
+ if (IS_QLAFX00(ha))
+ return QLA_SUCCESS;
+
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1858,6 +1875,7 @@ static struct isp_operations qla2100_isp_ops = {
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1895,6 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1932,6 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1969,6 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -2006,6 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla2x00_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla82xx_isp_ops = {
@@ -2043,6 +2065,7 @@ static struct isp_operations qla82xx_isp_ops = {
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
};
static struct isp_operations qla83xx_isp_ops = {
@@ -2080,6 +2103,45 @@ static struct isp_operations qla83xx_isp_ops = {
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
.iospace_config = qla83xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qlafx00_isp_ops = {
+ .pci_config = qlafx00_pci_config,
+ .reset_chip = qlafx00_soft_reset,
+ .chip_diag = qlafx00_chip_diag,
+ .config_rings = qlafx00_config_rings,
+ .reset_adapter = qlafx00_soft_reset,
+ .nvram_config = NULL,
+ .update_fw_options = NULL,
+ .load_risc = NULL,
+ .pci_info_str = qlafx00_pci_info_str,
+ .fw_version_str = qlafx00_fw_version_str,
+ .intr_handler = qlafx00_intr_handler,
+ .enable_intrs = qlafx00_enable_intrs,
+ .disable_intrs = qlafx00_disable_intrs,
+ .abort_command = qlafx00_abort_command,
+ .target_reset = qlafx00_abort_target,
+ .lun_reset = qlafx00_lun_reset,
+ .fabric_login = NULL,
+ .fabric_logout = NULL,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = qla24xx_read_nvram_data,
+ .write_nvram = qla24xx_write_nvram_data,
+ .fw_dump = NULL,
+ .beacon_on = qla24xx_beacon_on,
+ .beacon_off = qla24xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla24xx_read_optrom_data,
+ .write_optrom = qla24xx_write_optrom_data,
+ .get_flash_version = qla24xx_get_flash_version,
+ .start_scsi = qlafx00_start_scsi,
+ .abort_isp = qlafx00_abort_isp,
+ .iospace_config = qlafx00_iospace_config,
+ .initialize_adapter = qlafx00_initialize_adapter,
};
static inline void
@@ -2192,6 +2254,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
+ case PCI_DEVICE_ID_QLOGIC_ISPF001:
+ ha->device_type |= DT_ISPFX00;
+ break;
}
if (IS_QLA82XX(ha))
@@ -2265,7 +2330,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2436,6 +2502,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
ha->nvram_conf_off = ~0;
ha->nvram_data_off = ~0;
+ } else if (IS_QLAFX00(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
+ ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
+ req_length = REQUEST_ENTRY_CNT_FX00;
+ rsp_length = RESPONSE_ENTRY_CNT_FX00;
+ ha->init_cb_size = sizeof(struct init_cb_fx);
+ ha->isp_ops = &qlafx00_isp_ops;
+ ha->port_down_retry_count = 30; /* default value */
+ ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+ ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ ha->mr.fw_hbt_en = 1;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2500,13 +2578,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
- host->can_queue = req->length + 128;
+ if (IS_QLAFX00(ha))
+ host->can_queue = 1024;
+ else
+ host->can_queue = req->length + 128;
if (IS_QLA2XXX_MIDTYPE(ha))
base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
+ /* Setup fcport template structure. */
+ ha->mr.fcport.vha = base_vha;
+ ha->mr.fcport.port_type = FCT_UNKNOWN;
+ ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
+ qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
+ ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
+ ha->mr.fcport.scan_state = 1;
+
/* Set the SG table size based on ISP type */
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLA2100(ha))
@@ -2562,6 +2651,13 @@ que_init:
rsp->req = req;
req->rsp = rsp;
+ if (IS_QLAFX00(ha)) {
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
+ set_bit(0, ha->req_qid_map);
+ set_bit(0, ha->rsp_qid_map);
+ }
+
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2574,6 +2670,13 @@ que_init:
rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out;
}
+ if (IS_QLAFX00(ha)) {
+ req->req_q_in = &ha->iobase->ispfx00.req_q_in;
+ req->req_q_out = &ha->iobase->ispfx00.req_q_out;
+ rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
+ rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
+ }
+
if (IS_QLA82XX(ha)) {
req->req_q_out = &ha->iobase->isp82.req_q_out[0];
rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
@@ -2595,7 +2698,7 @@ que_init:
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- if (qla2x00_initialize_adapter(base_vha)) {
+ if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
"Failed to initialize adapter - Adapter flags %x.\n",
base_vha->device_flags);
@@ -2720,6 +2823,18 @@ skip_dpc:
qla2x00_alloc_sysfs_attr(base_vha);
+ if (IS_QLAFX00(ha)) {
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
+
+ /* Register system information */
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
+ }
+
qla2x00_init_host_attr(base_vha);
qla2x00_dfs_setup(base_vha);
@@ -2777,6 +2892,8 @@ iospace_config_failed:
} else {
if (ha->iobase)
iounmap(ha->iobase);
+ if (ha->cregbase)
+ iounmap(ha->cregbase);
}
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
@@ -2960,6 +3077,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (ha->iobase)
iounmap(ha->iobase);
+ if (ha->cregbase)
+ iounmap(ha->cregbase);
+
if (ha->mqiobase)
iounmap(ha->mqiobase);
@@ -3068,6 +3188,12 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
+ if (IS_QLAFX00(vha->hw)) {
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ qla2x00_schedule_rport_del(vha, fcport, defer);
+ return;
+ }
+
if (atomic_read(&fcport->state) == FCS_ONLINE &&
vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
@@ -3710,6 +3836,22 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
}
+int
+qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
+ uint32_t *data, int cnt)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.aenfx.evtcode = evtcode;
+ e->u.aenfx.count = cnt;
+ memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
+ return qla2x00_post_work(vha, e);
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -3758,6 +3900,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
+ case QLA_EVT_AENFX:
+ qlafx00_process_aen(vha, e);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4592,6 +4737,38 @@ qla2x00_do_dpc(void *data)
ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
"FCoE context reset end.\n");
}
+ } else if (IS_QLAFX00(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
+ "Firmware Reset Recovery\n");
+ if (qlafx00_reset_initialize(base_vha)) {
+ /* Failed. Abort isp later. */
+ if (!test_bit(UNLOADING,
+ &base_vha->dpc_flags))
+ set_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha,
+ 0x4021,
+ "Reset Recovery Failed\n");
+ }
+ }
+
+ if (test_and_clear_bit(FX00_TARGET_SCAN,
+ &base_vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
+ "ISPFx00 Target Scan scheduled\n");
+ if (qlafx00_rescan_isp(base_vha)) {
+ if (!test_bit(UNLOADING,
+ &base_vha->dpc_flags))
+ set_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
+ "ISPFx00 Target Scan Failed\n");
+ }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
+ "ISPFx00 Target Scan End\n");
+ }
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
@@ -4630,6 +4807,9 @@ qla2x00_do_dpc(void *data)
clear_bit(SCR_PENDING, &base_vha->dpc_flags);
}
+ if (IS_QLAFX00(ha))
+ goto loop_resync_check;
+
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@@ -4654,7 +4834,7 @@ qla2x00_do_dpc(void *data)
}
if (test_and_clear_bit(RESET_MARKER_NEEDED,
- &base_vha->dpc_flags) &&
+ &base_vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
@@ -4677,9 +4857,9 @@ qla2x00_do_dpc(void *data)
ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
"Relogin end.\n");
}
-
+loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
- &base_vha->dpc_flags)) {
+ &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
"Loop resync scheduled.\n");
@@ -4697,6 +4877,9 @@ qla2x00_do_dpc(void *data)
"Loop resync end.\n");
}
+ if (IS_QLAFX00(ha))
+ goto intr_on_check;
+
if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) == LOOP_READY) {
clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
@@ -4714,7 +4897,7 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
&base_vha->dpc_flags))
qla2x00_host_ramp_up_queuedepth(base_vha);
-
+intr_on_check:
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -4722,7 +4905,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags))
ha->isp_ops->beacon_blink(base_vha);
- qla2x00_do_dpc_all_vps(base_vha);
+ if (!IS_QLAFX00(ha))
+ qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
end_loop:
@@ -4818,6 +5002,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
qla82xx_watchdog(vha);
}
+ if (!vha->vp_idx && IS_QLAFX00(ha))
+ qlafx00_timer_routine(vha);
+
/* Loop down handler. */
if (atomic_read(&vha->loop_down_timer) > 0 &&
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@@ -5335,6 +5522,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ec54036d1e12..6c66d22eb1b1 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.13-k"
+#define QLA2XXX_VERSION "8.05.00.03-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 4
+#define QLA_DRIVER_MINOR_VER 5
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5d8fe4f75650..d607eb8e24cb 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1629,9 +1629,37 @@ static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
}
+/**
+ * qla4_83xx_eport_init - Initialize EPort.
+ * @ha: Pointer to host adapter structure.
+ *
+ * If EPort hardware is in reset state before disabling pause, there would be
+ * serious hardware wedging issues. To prevent this perform eport init everytime
+ * before disabling pause frames.
+ **/
+static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
+{
+ /* Clear the 8 registers */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
+
+ /* Write any value to Reset Control register */
+ qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
+
+ ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
+}
+
void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
{
ha->isp_ops->idc_lock(ha);
+ /* Before disabling pause frames, ensure that eport is not in reset */
+ qla4_83xx_eport_init(ha);
qla4_83xx_dump_pause_control_regs(ha);
__qla4_83xx_disable_pause(ha);
ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index 6a00f903f2a6..fab237fa32cc 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -55,6 +55,16 @@
#define QLA83XX_SET_PAUSE_VAL 0x0
#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
+#define QLA83XX_RESET_CONTROL 0x28084E50
+#define QLA83XX_RESET_REG 0x28084E60
+#define QLA83XX_RESET_PORT0 0x28084E70
+#define QLA83XX_RESET_PORT1 0x28084E80
+#define QLA83XX_RESET_PORT2 0x28084E90
+#define QLA83XX_RESET_PORT3 0x28084EA0
+#define QLA83XX_RESET_SRE_SHIM 0x28084EB0
+#define QLA83XX_RESET_EPG_SHIM 0x28084EC0
+#define QLA83XX_RESET_ETHER_PCS 0x28084ED0
+
/* qla_83xx_reg_tbl registers */
#define QLA83XX_PEG_HALT_STATUS1 0x34A8
#define QLA83XX_PEG_HALT_STATUS2 0x34AC
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index 5b0afc18ef18..51c365bcf912 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -12,6 +12,7 @@
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
+/* #define QL_DEBUG_LEVEL_7 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
@@ -48,6 +49,12 @@
#define DEBUG5(x) do {} while (0);
#endif /* */
+#if defined(QL_DEBUG_LEVEL_7)
+#define DEBUG7(x) do {x; } while (0)
+#else /* */
+#define DEBUG7(x) do {} while (0)
+#endif /* */
+
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 129f5dd02822..ddf16a86bbf5 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -159,6 +159,22 @@
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
+#define DEV_DB_NON_PERSISTENT 0
+#define DEV_DB_PERSISTENT 1
+
+#define COPY_ISID(dst_isid, src_isid) { \
+ int i, j; \
+ for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
+ dst_isid[i++] = src_isid[j--]; \
+}
+
+#define SET_BITVAL(o, n, v) { \
+ if (o) \
+ n |= v; \
+ else \
+ n &= ~v; \
+}
+
/*
* Retry & Timeout Values
*/
@@ -363,6 +379,8 @@ struct ql82xx_hw_data {
uint32_t flt_iscsi_param;
uint32_t flt_region_chap;
uint32_t flt_chap_size;
+ uint32_t flt_region_ddb;
+ uint32_t flt_ddb_size;
};
struct qla4_8xxx_legacy_intr_set {
@@ -501,6 +519,7 @@ struct scsi_qla_host {
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
+#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index ad9d2e2d370f..c7b8892b5a83 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -288,6 +288,8 @@ union external_hw_config_reg {
#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
#define FA_FLASH_ISCSI_CHAP 0x540000
#define FA_FLASH_CHAP_SIZE 0xC0000
+#define FA_FLASH_ISCSI_DDB 0x420000
+#define FA_FLASH_DDB_SIZE 0x080000
/* Flash Description Table */
struct qla_fdt_layout {
@@ -348,6 +350,7 @@ struct qla_flt_header {
#define FLT_REG_BOOT_CODE_82 0x78
#define FLT_REG_ISCSI_PARAM 0x65
#define FLT_REG_ISCSI_CHAP 0x63
+#define FLT_REG_ISCSI_DDB 0x6A
struct qla_flt_region {
uint32_t code;
@@ -490,12 +493,16 @@ struct qla_flt_region {
#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
+#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A
#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
+#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031
+#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
#define MBOX_ASTS_IDC_COMPLETE 0x8100
#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
+#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -779,12 +786,41 @@ struct dev_db_entry {
#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
+#define OPT_IS_FW_ASSIGNED_IPV6 11
+#define OPT_IPV6_DEVICE 8
+#define OPT_AUTO_SENDTGTS_DISABLE 6
+#define OPT_DISC_SESSION 4
+#define OPT_ENTRY_STATE 3
uint16_t exec_throttle; /* 02-03 */
uint16_t exec_count; /* 04-05 */
uint16_t res0; /* 06-07 */
uint16_t iscsi_options; /* 08-09 */
+#define ISCSIOPT_HEADER_DIGEST_EN 13
+#define ISCSIOPT_DATA_DIGEST_EN 12
+#define ISCSIOPT_IMMEDIATE_DATA_EN 11
+#define ISCSIOPT_INITIAL_R2T_EN 10
+#define ISCSIOPT_DATA_SEQ_IN_ORDER 9
+#define ISCSIOPT_DATA_PDU_IN_ORDER 8
+#define ISCSIOPT_CHAP_AUTH_EN 7
+#define ISCSIOPT_SNACK_REQ_EN 6
+#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5
+#define ISCSIOPT_BIDI_CHAP_EN 4
+#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3
+#define ISCSIOPT_ERL1 1
+#define ISCSIOPT_ERL0 0
+
uint16_t tcp_options; /* 0A-0B */
+#define TCPOPT_TIMESTAMP_STAT 6
+#define TCPOPT_NAGLE_DISABLE 5
+#define TCPOPT_WSF_DISABLE 4
+#define TCPOPT_TIMER_SCALE3 3
+#define TCPOPT_TIMER_SCALE2 2
+#define TCPOPT_TIMER_SCALE1 1
+#define TCPOPT_TIMESTAMP_EN 0
+
uint16_t ip_options; /* 0C-0D */
+#define IPOPT_FRAGMENT_DISABLE 4
+
uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
#define BYTE_UNITS 512
uint32_t res1; /* 10-13 */
@@ -816,6 +852,8 @@ struct dev_db_entry {
* much RAM */
uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
+#define DDB_NO_LINK 0xFFFF
+#define DDB_ISNS 0xFFFD
uint16_t ddb_link; /* 1C0-1C1 */
uint16_t chap_tbl_idx; /* 1C2-1C3 */
uint16_t tgt_portal_grp; /* 1C4-1C5 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 982293edf02c..4a428009f699 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -191,6 +191,9 @@ int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
uint32_t status, uint32_t pid,
uint32_t data_size, uint8_t *data);
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
@@ -224,8 +227,6 @@ void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
-uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
-uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
@@ -261,6 +262,10 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+ dma_addr_t dma_addr);
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t chap_index);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1b83dc283d2e..482287f4005f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -396,7 +396,6 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
task_data = task->dd_data;
memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
- ha->req_q_count += task_data->iocb_req_cnt;
ha->iocb_cnt -= task_data->iocb_req_cnt;
queue_work(ha->task_wq, &task_data->task_work);
}
@@ -416,7 +415,6 @@ static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
return mrb;
/* update counters */
- ha->req_q_count += mrb->iocb_cnt;
ha->iocb_cnt -= mrb->iocb_cnt;
return mrb;
@@ -877,6 +875,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
break;
+ case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+
+ case MBOX_ASTS_INITIALIZATION_FAILED:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
+ ha->host_no, mbox_sts[0],
+ mbox_sts[3]));
+ break;
+
+ case MBOX_ASTS_SYSTEM_WARNING_EVENT:
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ break;
+
+ case MBOX_ASTS_DCBX_CONF_CHANGE:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
@@ -1099,8 +1134,8 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "%s legacy Int not triggered\n", __func__));
+ DEBUG7(ql4_printk(KERN_INFO, ha,
+ "%s legacy Int not triggered\n", __func__));
return IRQ_NONE;
}
@@ -1158,7 +1193,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
- DEBUG2(ql4_printk(KERN_ERR, ha,
+ DEBUG7(ql4_printk(KERN_ERR, ha,
"%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
__func__));
return IRQ_NONE;
@@ -1166,7 +1201,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
- DEBUG2(ql4_printk(KERN_ERR, ha,
+ DEBUG7(ql4_printk(KERN_ERR, ha,
"%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
__func__, (leg_int_ptr & PF_BITS_MASK),
ha->pf_bit));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 160d33697216..a501beab3ffe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1129,6 +1129,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
+ uint32_t scsi_lun[2];
int status = QLA_SUCCESS;
DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
@@ -1140,10 +1141,16 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
+ int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
- mbox_cmd[2] = lun << 8;
+ /* FW expects LUN bytes 0-3 in Incoming Mailbox 2
+ * (LUN byte 0 is LSByte, byte 3 is MSByte) */
+ mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
+ /* FW expects LUN bytes 4-7 in Incoming Mailbox 3
+ * (LUN byte 4 is LSByte, byte 7 is MSByte) */
+ mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
@@ -1281,8 +1288,8 @@ exit_about_fw:
return status;
}
-static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
- dma_addr_t dma_addr)
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+ dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -1410,6 +1417,55 @@ exit_bootdb_failed:
return status;
}
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+ uint32_t dev_db_start_offset;
+ uint32_t dev_db_end_offset;
+ int status = QLA_ERROR;
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+ if (is_qla40XX(ha)) {
+ dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+ } else {
+ dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+ (ha->hw.flt_region_ddb << 2);
+ /* flt_ddb_size is DDB table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+ dev_db_end_offset = dev_db_start_offset +
+ (ha->hw.flt_ddb_size / 2);
+ }
+
+ dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s:Invalid DDB index %d", __func__,
+ ddb_index));
+ goto exit_fdb_failed;
+ }
+
+ if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
+ ha->host_no, __func__);
+ goto exit_fdb_failed;
+ }
+
+ if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+ status = QLA_SUCCESS;
+
+exit_fdb_failed:
+ return status;
+}
+
int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx)
{
@@ -1503,6 +1559,62 @@ exit_set_chap:
return ret;
}
+
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+ char *password, uint16_t chap_index)
+{
+ int rval = QLA_ERROR;
+ struct ql4_chap_table *chap_table = NULL;
+ int max_chap_entries;
+
+ if (!ha->chap_list) {
+ ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ if (!username || !password) {
+ ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ if (is_qla80XX(ha))
+ max_chap_entries = (ha->hw.flt_chap_size / 2) /
+ sizeof(struct ql4_chap_table);
+ else
+ max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+ if (chap_index > max_chap_entries) {
+ ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+ rval = QLA_ERROR;
+ goto exit_uni_chap;
+ }
+
+ mutex_lock(&ha->chap_sem);
+ chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
+ if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+ rval = QLA_ERROR;
+ goto exit_unlock_uni_chap;
+ }
+
+ if (!(chap_table->flags & BIT_6)) {
+ ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
+ rval = QLA_ERROR;
+ goto exit_unlock_uni_chap;
+ }
+
+ strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+ strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+
+ rval = QLA_SUCCESS;
+
+exit_unlock_uni_chap:
+ mutex_unlock(&ha->chap_sem);
+exit_uni_chap:
+ return rval;
+}
+
/**
* qla4xxx_get_chap_index - Get chap index given username and secret
* @ha: pointer to adapter structure
@@ -1524,7 +1636,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 9299400d3c9e..eaf00c162eb2 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3154,6 +3154,10 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
hw->flt_region_chap = start;
hw->flt_chap_size = le32_to_cpu(region->size);
break;
+ case FLT_REG_ISCSI_DDB:
+ hw->flt_region_ddb = start;
+ hw->flt_ddb_size = le32_to_cpu(region->size);
+ break;
}
}
goto done;
@@ -3166,14 +3170,19 @@ no_flash_data:
hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
- hw->flt_region_chap = FA_FLASH_ISCSI_CHAP;
+ hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2;
hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
+ hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2;
+ hw->flt_ddb_size = FA_FLASH_DDB_SIZE;
done:
- DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
- "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
- hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload,
- hw->flt_region_fw));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n",
+ loc, hw->flt_region_flt, hw->flt_region_fdt,
+ hw->flt_region_boot, hw->flt_region_bootload,
+ hw->flt_region_fw, hw->flt_region_chap,
+ hw->flt_chap_size, hw->flt_region_ddb,
+ hw->flt_ddb_size));
}
static void
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6142729167f4..a47f99957ba8 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -166,6 +166,26 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason);
+/*
+ * iSCSI Flash DDB sysfs entry points
+ */
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn,
+ void *data, int len);
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+ int param, char *buf);
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+ int len);
+static int
+qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
+
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@@ -232,6 +252,13 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.send_ping = qla4xxx_send_ping,
.get_chap = qla4xxx_get_chap_list,
.delete_chap = qla4xxx_delete_chap,
+ .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
+ .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
+ .new_flashnode = qla4xxx_sysfs_ddb_add,
+ .del_flashnode = qla4xxx_sysfs_ddb_delete,
+ .login_flashnode = qla4xxx_sysfs_ddb_login,
+ .logout_flashnode = qla4xxx_sysfs_ddb_logout,
+ .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid,
};
static struct scsi_transport_template *qla4xxx_scsi_transport;
@@ -376,6 +403,68 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
default:
return 0;
}
+ case ISCSI_FLASHNODE_PARAM:
+ switch (param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_FLASHNODE_ERL:
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ case ISCSI_FLASHNODE_MAX_R2T:
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ case ISCSI_FLASHNODE_ISID:
+ case ISCSI_FLASHNODE_TSID:
+ case ISCSI_FLASHNODE_PORT:
+ case ISCSI_FLASHNODE_MAX_BURST:
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ case ISCSI_FLASHNODE_IPADDR:
+ case ISCSI_FLASHNODE_ALIAS:
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ case ISCSI_FLASHNODE_IPV6_TC:
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ case ISCSI_FLASHNODE_NAME:
+ case ISCSI_FLASHNODE_TPGT:
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ case ISCSI_FLASHNODE_USERNAME:
+ case ISCSI_FLASHNODE_PASSWORD:
+ case ISCSI_FLASHNODE_STATSN:
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ case ISCSI_FLASHNODE_IS_BOOT_TGT:
+ return S_IRUGO;
+ default:
+ return 0;
+ }
}
return 0;
@@ -391,7 +480,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
int valid_chap_entries = 0;
int ret = 0, i;
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
@@ -495,7 +584,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
memset(chap_table, 0, sizeof(struct ql4_chap_table));
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
@@ -1922,6 +2011,252 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
+ struct iscsi_bus_flash_conn *conn,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ unsigned long options = 0;
+ int rc = 0;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+ if (test_bit(OPT_IPV6_DEVICE, &options)) {
+ rc = iscsi_switch_str_param(&sess->portal_type,
+ PORTAL_TYPE_IPV6);
+ if (rc)
+ goto exit_copy;
+ } else {
+ rc = iscsi_switch_str_param(&sess->portal_type,
+ PORTAL_TYPE_IPV4);
+ if (rc)
+ goto exit_copy;
+ }
+
+ sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+ &options);
+ sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+ sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+ conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+ sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+ sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+ sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+ &options);
+ sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+ sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+ conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
+ sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+ &options);
+ sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+ sess->discovery_auth_optional =
+ test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+ if (test_bit(ISCSIOPT_ERL1, &options))
+ sess->erl |= BIT_1;
+ if (test_bit(ISCSIOPT_ERL0, &options))
+ sess->erl |= BIT_0;
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+ conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+ conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+ if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+ conn->tcp_timer_scale |= BIT_3;
+ if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+ conn->tcp_timer_scale |= BIT_2;
+ if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+ conn->tcp_timer_scale |= BIT_1;
+
+ conn->tcp_timer_scale >>= 1;
+ conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+ conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+ conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+ conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
+ conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
+ conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+ conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+ conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+ sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
+ sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
+ sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+ sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+
+ sess->default_taskmgmt_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ conn->port = le16_to_cpu(fw_ddb_entry->port);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->ipaddress) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+
+ conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->redirect_ipaddr) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+
+ memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
+
+ if (test_bit(OPT_IPV6_DEVICE, &options)) {
+ conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
+
+ conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+ if (!conn->link_local_ipv6_addr) {
+ rc = -ENOMEM;
+ goto exit_copy;
+ }
+
+ memcpy(conn->link_local_ipv6_addr,
+ fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
+ } else {
+ conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+ }
+
+ if (fw_ddb_entry->iscsi_name[0]) {
+ rc = iscsi_switch_str_param(&sess->targetname,
+ (char *)fw_ddb_entry->iscsi_name);
+ if (rc)
+ goto exit_copy;
+ }
+
+ if (fw_ddb_entry->iscsi_alias[0]) {
+ rc = iscsi_switch_str_param(&sess->targetalias,
+ (char *)fw_ddb_entry->iscsi_alias);
+ if (rc)
+ goto exit_copy;
+ }
+
+ COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+exit_copy:
+ return rc;
+}
+
+static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
+ struct iscsi_bus_flash_conn *conn,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ uint16_t options;
+ int rc = 0;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
+ if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= BIT_8;
+ else
+ options &= ~BIT_8;
+
+ SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
+ SET_BITVAL(sess->discovery_sess, options, BIT_4);
+ SET_BITVAL(sess->entry_state, options, BIT_3);
+ fw_ddb_entry->options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
+ SET_BITVAL(conn->datadgst_en, options, BIT_12);
+ SET_BITVAL(sess->imm_data_en, options, BIT_11);
+ SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
+ SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
+ SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
+ SET_BITVAL(sess->chap_auth_en, options, BIT_7);
+ SET_BITVAL(conn->snack_req_en, options, BIT_6);
+ SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
+ SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
+ SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
+ SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
+ SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
+ fw_ddb_entry->iscsi_options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
+ SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
+ SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
+ SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
+ SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
+ fw_ddb_entry->tcp_options = cpu_to_le16(options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ SET_BITVAL(conn->fragment_disable, options, BIT_4);
+ fw_ddb_entry->ip_options = cpu_to_le16(options);
+
+ fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+ fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+ cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
+ fw_ddb_entry->iscsi_max_snd_data_seg_len =
+ cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
+ fw_ddb_entry->iscsi_first_burst_len =
+ cpu_to_le16(sess->first_burst / BYTE_UNITS);
+ fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
+ BYTE_UNITS);
+ fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
+ fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
+ fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
+ fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
+ fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
+ fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+ fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+ fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
+ fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
+ fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
+ fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
+ fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+ fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
+ fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
+ fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
+ fw_ddb_entry->port = cpu_to_le16(conn->port);
+ fw_ddb_entry->def_timeout =
+ cpu_to_le16(sess->default_taskmgmt_timeout);
+
+ if (conn->ipaddress)
+ memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
+ sizeof(fw_ddb_entry->ip_addr));
+
+ if (conn->redirect_ipaddr)
+ memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
+ sizeof(fw_ddb_entry->tgt_addr));
+
+ if (conn->link_local_ipv6_addr)
+ memcpy(fw_ddb_entry->link_local_ipv6_addr,
+ conn->link_local_ipv6_addr,
+ sizeof(fw_ddb_entry->link_local_ipv6_addr));
+
+ if (sess->targetname)
+ memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+ sizeof(fw_ddb_entry->iscsi_name));
+
+ if (sess->targetalias)
+ memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
+ sizeof(fw_ddb_entry->iscsi_alias));
+
+ COPY_ISID(fw_ddb_entry->isid, sess->isid);
+
+ return rc;
+}
+
static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
struct iscsi_cls_session *cls_sess,
@@ -2543,6 +2878,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
uint32_t dev_state;
+ uint32_t idc_ctrl;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
@@ -2561,10 +2897,23 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+
+ ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
+ __func__);
+
+ if (is_qla8032(ha)) {
+ idc_ctrl = qla4_83xx_rd_reg(ha,
+ QLA83XX_IDC_DRV_CTRL);
+ if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
+ ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
+ __func__);
+ qla4xxx_mailbox_premature_completion(
+ ha);
+ }
+ }
+
if (is_qla8032(ha) ||
(is_qla8022(ha) && !ql4xdontresethba)) {
- ql4_printk(KERN_INFO, ha, "%s: HW State: "
- "NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
}
@@ -3737,8 +4086,8 @@ static struct isp_operations qla4_83xx_isp_ops = {
.reset_firmware = qla4_8xxx_stop_firmware,
.queue_iocb = qla4_83xx_queue_iocb,
.complete_iocb = qla4_83xx_complete_iocb,
- .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out,
- .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in,
+ .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
+ .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
.get_sys_info = qla4_8xxx_get_sys_info,
.rd_reg_direct = qla4_83xx_rd_reg,
.wr_reg_direct = qla4_83xx_wr_reg,
@@ -3761,11 +4110,6 @@ uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
}
-uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
-{
- return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
-}
-
uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
{
return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
@@ -3776,11 +4120,6 @@ uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
}
-uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
-{
- return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
-}
-
static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
{
struct scsi_qla_host *ha = data;
@@ -4005,7 +4344,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
if (val & BIT_7)
ddb_index[1] = (val & 0x7f);
- } else if (is_qla8022(ha)) {
+ } else if (is_qla80XX(ha)) {
buf = dma_alloc_coherent(&ha->pdev->dev, size,
&buf_dma, GFP_KERNEL);
if (!buf) {
@@ -4083,7 +4422,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
- if (is_qla8022(ha))
+ if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
@@ -5058,6 +5397,1342 @@ exit_nt_list:
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
}
+static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_new_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ /* Check if NT, then add it to list */
+ if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_new_nt;
+
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
+ goto continue_next_new_nt;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ if (ret == QLA_SUCCESS) {
+ /* free nt_ddb_idx and do not add to list_nt */
+ vfree(nt_ddb_idx);
+ goto continue_next_new_nt;
+ }
+
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+ idx);
+ if (ret == QLA_ERROR)
+ goto exit_new_nt_list;
+
+continue_next_new_nt:
+ if (next_idx == 0)
+ break;
+ }
+
+exit_new_nt_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
+ * @dev: dev associated with the sysfs entry
+ * @data: pointer to flashnode session object
+ *
+ * Returns:
+ * 1: if flashnode entry is non-persistent
+ * 0: if flashnode entry is persistent
+ **/
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+{
+ struct iscsi_bus_flash_session *fnode_sess;
+
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ return 0;
+
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+
+ return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
+ * @ha: pointer to host
+ * @fw_ddb_entry: flash ddb data
+ * @idx: target index
+ * @user: if set then this call is made from userland else from kernel
+ *
+ * Returns:
+ * On sucess: QLA_SUCCESS
+ * On failure: QLA_ERROR
+ *
+ * This create separate sysfs entries for session and connection attributes of
+ * the given fw ddb entry.
+ * If this is invoked as a result of a userspace call then the entry is marked
+ * as nonpersistent using flash_state field.
+ **/
+int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t *idx, int user)
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ struct iscsi_bus_flash_conn *fnode_conn = NULL;
+ int rc = QLA_ERROR;
+
+ fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
+ &qla4xxx_iscsi_transport, 0);
+ if (!fnode_sess) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
+ __func__, *idx, ha->host_no);
+ goto exit_tgt_create;
+ }
+
+ fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
+ &qla4xxx_iscsi_transport, 0);
+ if (!fnode_conn) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
+ __func__, *idx, ha->host_no);
+ goto free_sess;
+ }
+
+ if (user) {
+ fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
+ } else {
+ fnode_sess->flash_state = DEV_DB_PERSISTENT;
+
+ if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
+ fnode_sess->is_boot_target = 1;
+ else
+ fnode_sess->is_boot_target = 0;
+ }
+
+ rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
+ fw_ddb_entry);
+
+ ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+ __func__, fnode_sess->dev.kobj.name);
+
+ ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+ __func__, fnode_conn->dev.kobj.name);
+
+ return QLA_SUCCESS;
+
+free_sess:
+ iscsi_destroy_flashnode_sess(fnode_sess);
+
+exit_tgt_create:
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
+ * @shost: pointer to host
+ * @buf: type of ddb entry (ipv4/ipv6)
+ * @len: length of buf
+ *
+ * This creates new ddb entry in the flash by finding first free index and
+ * storing default ddb there. And then create sysfs entry for the new ddb entry.
+ **/
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+ int len)
+{
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ struct device *dev;
+ uint16_t idx = 0;
+ uint16_t max_ddbs = 0;
+ uint32_t options = 0;
+ uint32_t rval = QLA_ERROR;
+
+ if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
+ strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
+ __func__));
+ goto exit_ddb_add;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ goto exit_ddb_add;
+ }
+
+ dev = iscsi_find_flashnode_sess(ha->host, NULL,
+ qla4xxx_sysfs_ddb_is_non_persistent);
+ if (dev) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: A non-persistent entry %s found\n",
+ __func__, dev->kobj.name);
+ goto exit_ddb_add;
+ }
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
+ fw_ddb_entry_dma, idx))
+ break;
+ }
+
+ if (idx == max_ddbs)
+ goto exit_ddb_add;
+
+ if (!strncasecmp("ipv6", buf, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (rval == QLA_ERROR)
+ goto exit_ddb_add;
+
+ rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
+
+exit_ddb_add:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ if (rval == QLA_SUCCESS)
+ return idx;
+ else
+ return -EIO;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This writes the contents of target ddb buffer to Flash with a valid cookie
+ * value in order to make the ddb entry persistent.
+ **/
+static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t options = 0;
+ int rval = 0;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ rval = -ENOMEM;
+ goto exit_ddb_apply;
+ }
+
+ if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (rval == QLA_ERROR)
+ goto exit_ddb_apply;
+
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+
+ qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+ fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+ rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
+
+ if (rval == QLA_SUCCESS) {
+ fnode_sess->flash_state = DEV_DB_PERSISTENT;
+ ql4_printk(KERN_INFO, ha,
+ "%s: flash node %u of host %lu written to flash\n",
+ __func__, fnode_sess->target_id, ha->host_no);
+ } else {
+ rval = -EIO;
+ ql4_printk(KERN_ERR, ha,
+ "%s: Error while writing flash node %u of host %lu to flash\n",
+ __func__, fnode_sess->target_id, ha->host_no);
+ }
+
+exit_ddb_apply:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return rval;
+}
+
+static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t idx)
+{
+ struct dev_db_entry *ddb_entry = NULL;
+ dma_addr_t ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t mbx_sts = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t tmo = 0;
+ int ret = 0;
+
+ ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+ &ddb_entry_dma, GFP_KERNEL);
+ if (!ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
+
+ ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to set ddb entry for index %d\n",
+ __func__, idx));
+ goto exit_ddb_conn_open;
+ }
+
+ qla4xxx_conn_open(ha, idx);
+
+ /* To ensure that sendtargets is done, wait for at least 12 secs */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for login to ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+ NULL, &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED)
+ break;
+
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+exit_ddb_conn_open:
+ if (ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+ ddb_entry, ddb_entry_dma);
+ return ret;
+}
+
+static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ struct list_head list_nt;
+ uint16_t ddb_index;
+ int ret = 0;
+
+ if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: A discovery already in progress!\n", __func__);
+ return QLA_ERROR;
+ }
+
+ INIT_LIST_HEAD(&list_nt);
+
+ set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login_st_clr_bit;
+
+ ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login_st;
+
+ qla4xxx_build_new_nt_list(ha, &list_nt);
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
+ list_del_init(&ddb_idx->list);
+ qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
+ vfree(ddb_idx);
+ }
+
+exit_login_st:
+ if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha,
+ "Unable to clear DDB index = 0x%x\n", ddb_index);
+ }
+
+ clear_bit(ddb_index, ha->ddb_idx_map);
+
+exit_login_st_clr_bit:
+ clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+ return ret;
+}
+
+static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ uint16_t idx)
+{
+ int ret = QLA_ERROR;
+
+ ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+ if (ret != QLA_SUCCESS)
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+ idx);
+ else
+ ret = -EPERM;
+
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_login - Login to the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This logs in to the specified target
+ **/
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t options = 0;
+ int ret = 0;
+
+ if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Target info is not persistent\n", __func__);
+ ret = -EIO;
+ goto exit_ddb_login;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ ret = -ENOMEM;
+ goto exit_ddb_login;
+ }
+
+ if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ options |= IPV6_DEFAULT_DDB_ENTRY;
+
+ ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+ if (ret == QLA_ERROR)
+ goto exit_ddb_login;
+
+ qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+ fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+ if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+ ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+ else
+ ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
+ fnode_sess->target_id);
+
+ if (ret > 0)
+ ret = -EIO;
+
+exit_ddb_login:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
+ * @cls_sess: pointer to session to be logged out
+ *
+ * This performs session log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry = NULL;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long flags;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int options;
+ int ret = 0;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
+ __func__);
+ ret = -ENXIO;
+ goto exit_ddb_logout;
+ }
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Logout from boot target entry is not permitted.\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+ ret = -EIO;
+ goto exit_ddb_logout;
+ }
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto ddb_logout_clr_sess;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto ddb_logout_clr_sess;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+ddb_logout_clr_sess:
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ iscsi_session_teardown(ddb_entry->sess);
+
+ ret = QLA_SUCCESS;
+
+exit_ddb_logout:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+ return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout - Logout from the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This performs log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct ql4_tuple_ddb *flash_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ int idx, index;
+ int status, ret = 0;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ flash_tddb = vzalloc(sizeof(*flash_tddb));
+ if (!flash_tddb) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s:Memory Allocation failed.\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s:Memory Allocation failed.\n", __func__);
+ ret = -ENOMEM;
+ goto exit_ddb_logout;
+ }
+
+ if (!fnode_sess->targetname) {
+ ql4_printk(KERN_ERR, ha,
+ "%s:Cannot logout from SendTarget entry\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ if (fnode_sess->is_boot_target) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Logout from boot target entry is not permitted.\n",
+ __func__);
+ ret = -EPERM;
+ goto exit_ddb_logout;
+ }
+
+ strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+ ISCSI_NAME_SIZE);
+
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
+ else
+ sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
+
+ flash_tddb->tpgt = fnode_sess->tpgt;
+ flash_tddb->port = fnode_conn->port;
+
+ COPY_ISID(flash_tddb->isid, fnode_sess->isid);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ index = ddb_entry->sess->target_id;
+ status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
+ fw_ddb_dma, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ &conn_id);
+ if (status == QLA_ERROR) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
+
+ status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
+ true);
+ if (status == QLA_SUCCESS) {
+ ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
+ break;
+ }
+ }
+
+ if (idx == MAX_DDB_ENTRIES)
+ ret = -ESRCH;
+
+exit_ddb_logout:
+ if (flash_tddb)
+ vfree(flash_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ return ret;
+}
+
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+ int param, char *buf)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct ql4_chap_table chap_tbl;
+ struct device *dev;
+ int parent_type, parent_index = 0xffff;
+ int rc = 0;
+
+ dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+ iscsi_is_flashnode_conn_dev);
+ if (!dev)
+ return -EIO;
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ switch (param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
+ break;
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
+ break;
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
+ break;
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
+ break;
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
+ break;
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
+ break;
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
+ break;
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
+ break;
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
+ break;
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
+ break;
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
+ break;
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
+ break;
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
+ break;
+ case ISCSI_FLASHNODE_ERL:
+ rc = sprintf(buf, "%u\n", fnode_sess->erl);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
+ break;
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
+ break;
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
+ break;
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
+ break;
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
+ break;
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
+ break;
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
+ break;
+ case ISCSI_FLASHNODE_MAX_R2T:
+ rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
+ break;
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
+ break;
+ case ISCSI_FLASHNODE_ISID:
+ rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ fnode_sess->isid[0], fnode_sess->isid[1],
+ fnode_sess->isid[2], fnode_sess->isid[3],
+ fnode_sess->isid[4], fnode_sess->isid[5]);
+ break;
+ case ISCSI_FLASHNODE_TSID:
+ rc = sprintf(buf, "%u\n", fnode_sess->tsid);
+ break;
+ case ISCSI_FLASHNODE_PORT:
+ rc = sprintf(buf, "%d\n", fnode_conn->port);
+ break;
+ case ISCSI_FLASHNODE_MAX_BURST:
+ rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
+ break;
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ rc = sprintf(buf, "%u\n",
+ fnode_sess->default_taskmgmt_timeout);
+ break;
+ case ISCSI_FLASHNODE_IPADDR:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
+ else
+ rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
+ break;
+ case ISCSI_FLASHNODE_ALIAS:
+ if (fnode_sess->targetalias)
+ rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n",
+ fnode_conn->redirect_ipaddr);
+ else
+ rc = sprintf(buf, "%pI4\n",
+ fnode_conn->redirect_ipaddr);
+ break;
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
+ break;
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ rc = sprintf(buf, "%u\n", fnode_conn->local_port);
+ break;
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
+ break;
+ case ISCSI_FLASHNODE_IPV6_TC:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%u\n",
+ fnode_conn->ipv6_traffic_class);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
+ break;
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ rc = sprintf(buf, "%pI6\n",
+ fnode_conn->link_local_ipv6_addr);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ if ((fnode_sess->discovery_parent_idx) >= 0 &&
+ (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+ parent_index = fnode_sess->discovery_parent_idx;
+
+ rc = sprintf(buf, "%u\n", parent_index);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+ if (fnode_sess->discovery_parent_type == DDB_ISNS)
+ parent_type = ISCSI_DISC_PARENT_ISNS;
+ else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
+ parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+ else if (fnode_sess->discovery_parent_type >= 0 &&
+ fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+ parent_type = ISCSI_DISC_PARENT_SENDTGT;
+ else
+ parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+
+ rc = sprintf(buf, "%s\n",
+ iscsi_get_discovery_parent_name(parent_type));
+ break;
+ case ISCSI_FLASHNODE_NAME:
+ if (fnode_sess->targetname)
+ rc = sprintf(buf, "%s\n", fnode_sess->targetname);
+ else
+ rc = sprintf(buf, "\n");
+ break;
+ case ISCSI_FLASHNODE_TPGT:
+ rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
+ break;
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
+ break;
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
+ break;
+ case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+ rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
+ break;
+ case ISCSI_FLASHNODE_USERNAME:
+ if (fnode_sess->chap_auth_en) {
+ qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ fnode_sess->chap_out_idx);
+ rc = sprintf(buf, "%s\n", chap_tbl.name);
+ } else {
+ rc = sprintf(buf, "\n");
+ }
+ break;
+ case ISCSI_FLASHNODE_PASSWORD:
+ if (fnode_sess->chap_auth_en) {
+ qla4xxx_get_uni_chap_at_index(ha,
+ chap_tbl.name,
+ chap_tbl.secret,
+ fnode_sess->chap_out_idx);
+ rc = sprintf(buf, "%s\n", chap_tbl.secret);
+ } else {
+ rc = sprintf(buf, "\n");
+ }
+ break;
+ case ISCSI_FLASHNODE_STATSN:
+ rc = sprintf(buf, "%u\n", fnode_conn->statsn);
+ break;
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
+ break;
+ case ISCSI_FLASHNODE_IS_BOOT_TGT:
+ rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
+ break;
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ * @data: Parameters and their values to update
+ * @len: len of data
+ *
+ * This sets the parameter of flash ddb entry and writes them to flash
+ **/
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_bus_flash_conn *fnode_conn,
+ void *data, int len)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ struct iscsi_flashnode_param_info *fnode_param;
+ struct nlattr *attr;
+ int rc = QLA_ERROR;
+ uint32_t rem = len;
+
+ fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate ddb buffer\n",
+ __func__));
+ return -ENOMEM;
+ }
+
+ nla_for_each_attr(attr, data, len, rem) {
+ fnode_param = nla_data(attr);
+
+ switch (fnode_param->param) {
+ case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+ fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_PORTAL_TYPE:
+ memcpy(fnode_sess->portal_type, fnode_param->value,
+ strlen(fnode_sess->portal_type));
+ break;
+ case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+ fnode_sess->auto_snd_tgt_disable =
+ fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_SESS:
+ fnode_sess->discovery_sess = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_ENTRY_EN:
+ fnode_sess->entry_state = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_HDR_DGST_EN:
+ fnode_conn->hdrdgst_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DATA_DGST_EN:
+ fnode_conn->datadgst_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IMM_DATA_EN:
+ fnode_sess->imm_data_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+ fnode_sess->initial_r2t_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DATASEQ_INORDER:
+ fnode_sess->dataseq_inorder_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_PDU_INORDER:
+ fnode_sess->pdu_inorder_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+ fnode_sess->chap_auth_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_SNACK_REQ_EN:
+ fnode_conn->snack_req_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+ fnode_sess->discovery_logout_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+ fnode_sess->bidi_chap_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+ fnode_sess->discovery_auth_optional =
+ fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_ERL:
+ fnode_sess->erl = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+ fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+ fnode_conn->tcp_nagle_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+ fnode_conn->tcp_wsf_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+ fnode_conn->tcp_timer_scale = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+ fnode_conn->tcp_timestamp_en = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+ fnode_conn->fragment_disable = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+ fnode_conn->max_recv_dlength =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+ fnode_conn->max_xmit_dlength =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_FIRST_BURST:
+ fnode_sess->first_burst =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+ fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+ fnode_sess->time2retain =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_R2T:
+ fnode_sess->max_r2t =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+ fnode_conn->keepalive_timeout =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_ISID:
+ memcpy(fnode_sess->isid, fnode_param->value,
+ sizeof(fnode_sess->isid));
+ break;
+ case ISCSI_FLASHNODE_TSID:
+ fnode_sess->tsid = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_PORT:
+ fnode_conn->port = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_MAX_BURST:
+ fnode_sess->max_burst = *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+ fnode_sess->default_taskmgmt_timeout =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_IPADDR:
+ memcpy(fnode_conn->ipaddress, fnode_param->value,
+ IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_ALIAS:
+ rc = iscsi_switch_str_param(&fnode_sess->targetalias,
+ (char *)fnode_param->value);
+ break;
+ case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+ memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
+ IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+ fnode_conn->max_segment_size =
+ *(unsigned *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_LOCAL_PORT:
+ fnode_conn->local_port =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_IPV4_TOS:
+ fnode_conn->ipv4_tos = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IPV6_TC:
+ fnode_conn->ipv6_traffic_class = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+ fnode_conn->ipv6_flow_label = fnode_param->value[0];
+ break;
+ case ISCSI_FLASHNODE_NAME:
+ rc = iscsi_switch_str_param(&fnode_sess->targetname,
+ (char *)fnode_param->value);
+ break;
+ case ISCSI_FLASHNODE_TPGT:
+ fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+ memcpy(fnode_conn->link_local_ipv6_addr,
+ fnode_param->value, IPv6_ADDR_LEN);
+ break;
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+ fnode_sess->discovery_parent_type =
+ *(uint16_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+ fnode_conn->tcp_xmit_wsf =
+ *(uint8_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_TCP_RECV_WSF:
+ fnode_conn->tcp_recv_wsf =
+ *(uint8_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_STATSN:
+ fnode_conn->statsn = *(uint32_t *)fnode_param->value;
+ break;
+ case ISCSI_FLASHNODE_EXP_STATSN:
+ fnode_conn->exp_statsn =
+ *(uint32_t *)fnode_param->value;
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha,
+ "%s: No such sysfs attribute\n", __func__);
+ rc = -ENOSYS;
+ goto exit_set_param;
+ }
+ }
+
+ rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
+
+exit_set_param:
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ *
+ * This invalidates the flash ddb entry at the given index
+ **/
+static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
+{
+ struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+ struct scsi_qla_host *ha = to_qla_host(shost);
+ uint32_t dev_db_start_offset;
+ uint32_t dev_db_end_offset;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t *ddb_cookie = NULL;
+ size_t ddb_size;
+ void *pddb = NULL;
+ int target_id;
+ int rc = 0;
+
+ if (!fnode_sess) {
+ rc = -EINVAL;
+ goto exit_ddb_del;
+ }
+
+ if (fnode_sess->is_boot_target) {
+ rc = -EPERM;
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Deletion of boot target entry is not permitted.\n",
+ __func__));
+ goto exit_ddb_del;
+ }
+
+ if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
+ goto sysfs_ddb_del;
+
+ if (is_qla40XX(ha)) {
+ dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+ dev_db_end_offset = FLASH_OFFSET_DB_END;
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+ ddb_size = sizeof(*fw_ddb_entry);
+ } else {
+ dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+ (ha->hw.flt_region_ddb << 2);
+ /* flt_ddb_size is DDB table size for both ports
+ * so divide it by 2 to calculate the offset for second port
+ */
+ if (ha->port_num == 1)
+ dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+ dev_db_end_offset = dev_db_start_offset +
+ (ha->hw.flt_ddb_size / 2);
+
+ dev_db_start_offset += (fnode_sess->target_id *
+ sizeof(*fw_ddb_entry));
+ dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
+ (void *)fw_ddb_entry;
+
+ ddb_size = sizeof(*ddb_cookie);
+ }
+
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
+ __func__, dev_db_start_offset, dev_db_end_offset));
+
+ if (dev_db_start_offset > dev_db_end_offset) {
+ rc = -EIO;
+ DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
+ __func__, fnode_sess->target_id));
+ goto exit_ddb_del;
+ }
+
+ pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!pddb) {
+ rc = -ENOMEM;
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ goto exit_ddb_del;
+ }
+
+ if (is_qla40XX(ha)) {
+ fw_ddb_entry = pddb;
+ memset(fw_ddb_entry, 0, ddb_size);
+ ddb_cookie = &fw_ddb_entry->cookie;
+ } else {
+ ddb_cookie = pddb;
+ }
+
+ /* invalidate the cookie */
+ *ddb_cookie = 0xFFEE;
+ qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+ ddb_size, FLASH_OPT_RMW_COMMIT);
+
+sysfs_ddb_del:
+ target_id = fnode_sess->target_id;
+ iscsi_destroy_flashnode_sess(fnode_sess);
+ ql4_printk(KERN_INFO, ha,
+ "%s: session and conn entries for flashnode %u of host %lu deleted\n",
+ __func__, target_id, ha->host_no);
+exit_ddb_del:
+ if (pddb)
+ dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
+ fw_ddb_entry_dma);
+ return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
+ * @ha: pointer to adapter structure
+ *
+ * Export the firmware DDB for all send targets and normal targets to sysfs.
+ **/
+static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+{
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint16_t max_ddbs;
+ uint16_t idx = 0;
+ int ret = QLA_SUCCESS;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n",
+ __func__));
+ return -ENOMEM;
+ }
+
+ max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
+ idx))
+ continue;
+
+ ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
+ if (ret) {
+ ret = -EIO;
+ break;
+ }
+ }
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
+ fw_ddb_entry_dma);
+
+ return ret;
+}
+
+static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
+{
+ iscsi_destroy_all_flashnode(ha->host);
+}
+
/**
* qla4xxx_build_ddb_list - Build ddb list and setup sessions
* @ha: pointer to adapter structure
@@ -5341,8 +7016,11 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
/* Dont retry adapter initialization if IRQ allocation failed */
- if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
+ if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
+ __func__);
goto skip_retry_init;
+ }
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -5445,6 +7123,10 @@ skip_retry_init:
ql4_printk(KERN_ERR, ha,
"%s: No iSCSI boot target configured\n", __func__);
+ if (qla4xxx_sysfs_ddb_export(ha))
+ ql4_printk(KERN_ERR, ha,
+ "%s: Error exporting ddb to sysfs\n", __func__);
+
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
@@ -5570,6 +7252,7 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
qla4xxx_destroy_fw_ddb_session(ha);
qla4_8xxx_free_sysfs_attr(ha);
+ qla4xxx_sysfs_ddb_remove(ha);
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -5669,7 +7352,6 @@ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
/* update counters */
if (srb->flags & SRB_DMA_VALID) {
- ha->req_q_count += srb->iocb_cnt;
ha->iocb_cnt -= srb->iocb_cnt;
if (srb->cmd)
srb->cmd->host_scribble =
@@ -6081,6 +7763,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
{
struct scsi_qla_host *ha = to_qla_host(shost);
int rval = QLA_SUCCESS;
+ uint32_t idc_ctrl;
if (ql4xdontresethba) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
@@ -6111,6 +7794,14 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
}
recover_adapter:
+ /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
+ * reset is issued by application */
+ if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+ qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+ (idc_ctrl | GRACEFUL_RESET_BIT1));
+ }
+
rval = qla4xxx_recover_adapter(ha);
if (rval != QLA_SUCCESS) {
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6775a45af315..83e0fec35d56 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a74b975efdf..ce06e8772f3a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/bsg-lib.h>
#include <linux/idr.h>
+#include <linux/list.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -460,6 +461,689 @@ void iscsi_destroy_iface(struct iscsi_iface *iface)
EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
/*
+ * Interface to display flash node params to sysfs
+ */
+
+#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+/* flash node session attrs show */
+#define iscsi_flashnode_sess_attr_show(type, name, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_bus_flash_session *fnode_sess = \
+ iscsi_dev_to_flash_session(dev);\
+ struct iscsi_transport *t = fnode_sess->transport; \
+ return t->get_flashnode_param(fnode_sess, param, buf); \
+} \
+
+
+#define iscsi_flashnode_sess_attr(type, name, param) \
+ iscsi_flashnode_sess_attr_show(type, name, param) \
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
+ show_##type##_##name, NULL);
+
+/* Flash node session attributes */
+
+iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
+ ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
+iscsi_flashnode_sess_attr(fnode, discovery_session,
+ ISCSI_FLASHNODE_DISCOVERY_SESS);
+iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
+iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
+iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
+iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
+iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
+ ISCSI_FLASHNODE_DATASEQ_INORDER);
+iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
+ ISCSI_FLASHNODE_PDU_INORDER);
+iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_logout,
+ ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
+iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
+ ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
+iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
+iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
+iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
+iscsi_flashnode_sess_attr(fnode, def_time2retain,
+ ISCSI_FLASHNODE_DEF_TIME2RETAIN);
+iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
+iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
+iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
+iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
+iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
+ ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
+iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
+iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
+iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
+ ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
+ ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
+iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
+iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
+iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
+iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
+iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
+iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
+iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
+
+static struct attribute *iscsi_flashnode_sess_attrs[] = {
+ &dev_attr_fnode_auto_snd_tgt_disable.attr,
+ &dev_attr_fnode_discovery_session.attr,
+ &dev_attr_fnode_portal_type.attr,
+ &dev_attr_fnode_entry_enable.attr,
+ &dev_attr_fnode_immediate_data.attr,
+ &dev_attr_fnode_initial_r2t.attr,
+ &dev_attr_fnode_data_seq_in_order.attr,
+ &dev_attr_fnode_data_pdu_in_order.attr,
+ &dev_attr_fnode_chap_auth.attr,
+ &dev_attr_fnode_discovery_logout.attr,
+ &dev_attr_fnode_bidi_chap.attr,
+ &dev_attr_fnode_discovery_auth_optional.attr,
+ &dev_attr_fnode_erl.attr,
+ &dev_attr_fnode_first_burst_len.attr,
+ &dev_attr_fnode_def_time2wait.attr,
+ &dev_attr_fnode_def_time2retain.attr,
+ &dev_attr_fnode_max_outstanding_r2t.attr,
+ &dev_attr_fnode_isid.attr,
+ &dev_attr_fnode_tsid.attr,
+ &dev_attr_fnode_max_burst_len.attr,
+ &dev_attr_fnode_def_taskmgmt_tmo.attr,
+ &dev_attr_fnode_targetalias.attr,
+ &dev_attr_fnode_targetname.attr,
+ &dev_attr_fnode_tpgt.attr,
+ &dev_attr_fnode_discovery_parent_idx.attr,
+ &dev_attr_fnode_discovery_parent_type.attr,
+ &dev_attr_fnode_chap_in_idx.attr,
+ &dev_attr_fnode_chap_out_idx.attr,
+ &dev_attr_fnode_username.attr,
+ &dev_attr_fnode_username_in.attr,
+ &dev_attr_fnode_password.attr,
+ &dev_attr_fnode_password_in.attr,
+ &dev_attr_fnode_is_boot_target.attr,
+ NULL,
+};
+
+static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_bus_flash_session *fnode_sess =
+ iscsi_dev_to_flash_session(dev);
+ struct iscsi_transport *t = fnode_sess->transport;
+ int param;
+
+ if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
+ param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
+ } else if (attr == &dev_attr_fnode_discovery_session.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_SESS;
+ } else if (attr == &dev_attr_fnode_portal_type.attr) {
+ param = ISCSI_FLASHNODE_PORTAL_TYPE;
+ } else if (attr == &dev_attr_fnode_entry_enable.attr) {
+ param = ISCSI_FLASHNODE_ENTRY_EN;
+ } else if (attr == &dev_attr_fnode_immediate_data.attr) {
+ param = ISCSI_FLASHNODE_IMM_DATA_EN;
+ } else if (attr == &dev_attr_fnode_initial_r2t.attr) {
+ param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
+ } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
+ param = ISCSI_FLASHNODE_DATASEQ_INORDER;
+ } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
+ param = ISCSI_FLASHNODE_PDU_INORDER;
+ } else if (attr == &dev_attr_fnode_chap_auth.attr) {
+ param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
+ } else if (attr == &dev_attr_fnode_discovery_logout.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
+ } else if (attr == &dev_attr_fnode_bidi_chap.attr) {
+ param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
+ } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
+ } else if (attr == &dev_attr_fnode_erl.attr) {
+ param = ISCSI_FLASHNODE_ERL;
+ } else if (attr == &dev_attr_fnode_first_burst_len.attr) {
+ param = ISCSI_FLASHNODE_FIRST_BURST;
+ } else if (attr == &dev_attr_fnode_def_time2wait.attr) {
+ param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
+ } else if (attr == &dev_attr_fnode_def_time2retain.attr) {
+ param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
+ } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
+ param = ISCSI_FLASHNODE_MAX_R2T;
+ } else if (attr == &dev_attr_fnode_isid.attr) {
+ param = ISCSI_FLASHNODE_ISID;
+ } else if (attr == &dev_attr_fnode_tsid.attr) {
+ param = ISCSI_FLASHNODE_TSID;
+ } else if (attr == &dev_attr_fnode_max_burst_len.attr) {
+ param = ISCSI_FLASHNODE_MAX_BURST;
+ } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
+ param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
+ } else if (attr == &dev_attr_fnode_targetalias.attr) {
+ param = ISCSI_FLASHNODE_ALIAS;
+ } else if (attr == &dev_attr_fnode_targetname.attr) {
+ param = ISCSI_FLASHNODE_NAME;
+ } else if (attr == &dev_attr_fnode_tpgt.attr) {
+ param = ISCSI_FLASHNODE_TPGT;
+ } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
+ } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
+ param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
+ } else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
+ param = ISCSI_FLASHNODE_CHAP_IN_IDX;
+ } else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
+ param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
+ } else if (attr == &dev_attr_fnode_username.attr) {
+ param = ISCSI_FLASHNODE_USERNAME;
+ } else if (attr == &dev_attr_fnode_username_in.attr) {
+ param = ISCSI_FLASHNODE_USERNAME_IN;
+ } else if (attr == &dev_attr_fnode_password.attr) {
+ param = ISCSI_FLASHNODE_PASSWORD;
+ } else if (attr == &dev_attr_fnode_password_in.attr) {
+ param = ISCSI_FLASHNODE_PASSWORD_IN;
+ } else if (attr == &dev_attr_fnode_is_boot_target.attr) {
+ param = ISCSI_FLASHNODE_IS_BOOT_TGT;
+ } else {
+ WARN_ONCE(1, "Invalid flashnode session attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_sess_attr_group = {
+ .attrs = iscsi_flashnode_sess_attrs,
+ .is_visible = iscsi_flashnode_sess_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
+ &iscsi_flashnode_sess_attr_group,
+ NULL,
+};
+
+static void iscsi_flashnode_sess_release(struct device *dev)
+{
+ struct iscsi_bus_flash_session *fnode_sess =
+ iscsi_dev_to_flash_session(dev);
+
+ kfree(fnode_sess->targetname);
+ kfree(fnode_sess->targetalias);
+ kfree(fnode_sess->portal_type);
+ kfree(fnode_sess);
+}
+
+struct device_type iscsi_flashnode_sess_dev_type = {
+ .name = "iscsi_flashnode_sess_dev_type",
+ .groups = iscsi_flashnode_sess_attr_groups,
+ .release = iscsi_flashnode_sess_release,
+};
+
+/* flash node connection attrs show */
+#define iscsi_flashnode_conn_attr_show(type, name, param) \
+static ssize_t \
+show_##type##_##name(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
+ struct iscsi_bus_flash_session *fnode_sess = \
+ iscsi_flash_conn_to_flash_session(fnode_conn);\
+ struct iscsi_transport *t = fnode_conn->transport; \
+ return t->get_flashnode_param(fnode_sess, param, buf); \
+} \
+
+
+#define iscsi_flashnode_conn_attr(type, name, param) \
+ iscsi_flashnode_conn_attr_show(type, name, param) \
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \
+ show_##type##_##name, NULL);
+
+/* Flash node connection attributes */
+
+iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
+ ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
+iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
+ ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
+iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
+ ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
+ ISCSI_FLASHNODE_TCP_WSF_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
+ ISCSI_FLASHNODE_TCP_TIMER_SCALE);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
+ ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
+iscsi_flashnode_conn_attr(fnode, fragment_disable,
+ ISCSI_FLASHNODE_IP_FRAG_DISABLE);
+iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
+iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
+iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
+ ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
+ ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
+iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
+iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
+iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
+ ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
+iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
+ ISCSI_FLASHNODE_REDIRECT_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_segment_size,
+ ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
+iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
+ ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
+iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
+iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
+iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
+iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
+
+static struct attribute *iscsi_flashnode_conn_attrs[] = {
+ &dev_attr_fnode_is_fw_assigned_ipv6.attr,
+ &dev_attr_fnode_header_digest.attr,
+ &dev_attr_fnode_data_digest.attr,
+ &dev_attr_fnode_snack_req.attr,
+ &dev_attr_fnode_tcp_timestamp_stat.attr,
+ &dev_attr_fnode_tcp_nagle_disable.attr,
+ &dev_attr_fnode_tcp_wsf_disable.attr,
+ &dev_attr_fnode_tcp_timer_scale.attr,
+ &dev_attr_fnode_tcp_timestamp_enable.attr,
+ &dev_attr_fnode_fragment_disable.attr,
+ &dev_attr_fnode_max_recv_dlength.attr,
+ &dev_attr_fnode_max_xmit_dlength.attr,
+ &dev_attr_fnode_keepalive_tmo.attr,
+ &dev_attr_fnode_port.attr,
+ &dev_attr_fnode_ipaddress.attr,
+ &dev_attr_fnode_redirect_ipaddr.attr,
+ &dev_attr_fnode_max_segment_size.attr,
+ &dev_attr_fnode_local_port.attr,
+ &dev_attr_fnode_ipv4_tos.attr,
+ &dev_attr_fnode_ipv6_traffic_class.attr,
+ &dev_attr_fnode_ipv6_flow_label.attr,
+ &dev_attr_fnode_link_local_ipv6.attr,
+ &dev_attr_fnode_tcp_xmit_wsf.attr,
+ &dev_attr_fnode_tcp_recv_wsf.attr,
+ &dev_attr_fnode_statsn.attr,
+ &dev_attr_fnode_exp_statsn.attr,
+ NULL,
+};
+
+static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+ struct iscsi_transport *t = fnode_conn->transport;
+ int param;
+
+ if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
+ param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
+ } else if (attr == &dev_attr_fnode_header_digest.attr) {
+ param = ISCSI_FLASHNODE_HDR_DGST_EN;
+ } else if (attr == &dev_attr_fnode_data_digest.attr) {
+ param = ISCSI_FLASHNODE_DATA_DGST_EN;
+ } else if (attr == &dev_attr_fnode_snack_req.attr) {
+ param = ISCSI_FLASHNODE_SNACK_REQ_EN;
+ } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
+ } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
+ param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
+ } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
+ param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
+ } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
+ } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
+ param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
+ } else if (attr == &dev_attr_fnode_fragment_disable.attr) {
+ param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
+ } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
+ param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
+ } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
+ param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
+ } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
+ param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
+ } else if (attr == &dev_attr_fnode_port.attr) {
+ param = ISCSI_FLASHNODE_PORT;
+ } else if (attr == &dev_attr_fnode_ipaddress.attr) {
+ param = ISCSI_FLASHNODE_IPADDR;
+ } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
+ param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
+ } else if (attr == &dev_attr_fnode_max_segment_size.attr) {
+ param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
+ } else if (attr == &dev_attr_fnode_local_port.attr) {
+ param = ISCSI_FLASHNODE_LOCAL_PORT;
+ } else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
+ param = ISCSI_FLASHNODE_IPV4_TOS;
+ } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
+ param = ISCSI_FLASHNODE_IPV6_TC;
+ } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
+ param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
+ } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
+ param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
+ } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
+ param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
+ } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
+ param = ISCSI_FLASHNODE_TCP_RECV_WSF;
+ } else if (attr == &dev_attr_fnode_statsn.attr) {
+ param = ISCSI_FLASHNODE_STATSN;
+ } else if (attr == &dev_attr_fnode_exp_statsn.attr) {
+ param = ISCSI_FLASHNODE_EXP_STATSN;
+ } else {
+ WARN_ONCE(1, "Invalid flashnode connection attr");
+ return 0;
+ }
+
+ return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_conn_attr_group = {
+ .attrs = iscsi_flashnode_conn_attrs,
+ .is_visible = iscsi_flashnode_conn_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
+ &iscsi_flashnode_conn_attr_group,
+ NULL,
+};
+
+static void iscsi_flashnode_conn_release(struct device *dev)
+{
+ struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ kfree(fnode_conn->ipaddress);
+ kfree(fnode_conn->redirect_ipaddr);
+ kfree(fnode_conn->link_local_ipv6_addr);
+ kfree(fnode_conn);
+}
+
+struct device_type iscsi_flashnode_conn_dev_type = {
+ .name = "iscsi_flashnode_conn_dev_type",
+ .groups = iscsi_flashnode_conn_attr_groups,
+ .release = iscsi_flashnode_conn_release,
+};
+
+struct bus_type iscsi_flashnode_bus;
+
+int iscsi_flashnode_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &iscsi_flashnode_bus)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
+
+struct bus_type iscsi_flashnode_bus = {
+ .name = "iscsi_flashnode",
+ .match = &iscsi_flashnode_bus_match,
+};
+
+/**
+ * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
+ * @shost: pointer to host data
+ * @index: index of flashnode to add in sysfs
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode session attributes
+ *
+ * Returns:
+ * pointer to allocated flashnode sess on sucess
+ * %NULL on failure
+ */
+struct iscsi_bus_flash_session *
+iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
+ struct iscsi_transport *transport,
+ int dd_size)
+{
+ struct iscsi_bus_flash_session *fnode_sess;
+ int err;
+
+ fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
+ if (!fnode_sess)
+ return NULL;
+
+ fnode_sess->transport = transport;
+ fnode_sess->target_id = index;
+ fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
+ fnode_sess->dev.bus = &iscsi_flashnode_bus;
+ fnode_sess->dev.parent = &shost->shost_gendev;
+ dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
+ shost->host_no, index);
+
+ err = device_register(&fnode_sess->dev);
+ if (err)
+ goto free_fnode_sess;
+
+ if (dd_size)
+ fnode_sess->dd_data = &fnode_sess[1];
+
+ return fnode_sess;
+
+free_fnode_sess:
+ kfree(fnode_sess);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
+
+/**
+ * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
+ * @shost: pointer to host data
+ * @fnode_sess: pointer to the parent flashnode session entry
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode connection attributes
+ *
+ * Returns:
+ * pointer to allocated flashnode conn on success
+ * %NULL on failure
+ */
+struct iscsi_bus_flash_conn *
+iscsi_create_flashnode_conn(struct Scsi_Host *shost,
+ struct iscsi_bus_flash_session *fnode_sess,
+ struct iscsi_transport *transport,
+ int dd_size)
+{
+ struct iscsi_bus_flash_conn *fnode_conn;
+ int err;
+
+ fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
+ if (!fnode_conn)
+ return NULL;
+
+ fnode_conn->transport = transport;
+ fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
+ fnode_conn->dev.bus = &iscsi_flashnode_bus;
+ fnode_conn->dev.parent = &fnode_sess->dev;
+ dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
+ shost->host_no, fnode_sess->target_id);
+
+ err = device_register(&fnode_conn->dev);
+ if (err)
+ goto free_fnode_conn;
+
+ if (dd_size)
+ fnode_conn->dd_data = &fnode_conn[1];
+
+ return fnode_conn;
+
+free_fnode_conn:
+ kfree(fnode_conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
+
+/**
+ * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
+ * @dev: device to verify
+ * @data: pointer to data containing value to use for verification
+ *
+ * Verifies if the passed device is flashnode conn device
+ *
+ * Returns:
+ * 1 on success
+ * 0 on failure
+ */
+int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+{
+ return dev->bus == &iscsi_flashnode_bus;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
+
+static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
+{
+ device_unregister(&fnode_conn->dev);
+ return 0;
+}
+
+static int flashnode_match_index(struct device *dev, void *data)
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ int ret = 0;
+
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ goto exit_match_index;
+
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+ ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+
+exit_match_index:
+ return ret;
+}
+
+/**
+ * iscsi_get_flashnode_by_index -finds flashnode session entry by index
+ * @shost: pointer to host data
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode session object for the passed index
+ *
+ * Returns:
+ * pointer to found flashnode session object on success
+ * %NULL on failure
+ */
+static struct iscsi_bus_flash_session *
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct iscsi_bus_flash_session *fnode_sess = NULL;
+ struct device *dev;
+
+ dev = device_find_child(&shost->shost_gendev, data, fn);
+ if (dev)
+ fnode_sess = iscsi_dev_to_flash_session(dev);
+
+ return fnode_sess;
+}
+
+/**
+ * iscsi_find_flashnode_sess - finds flashnode session entry
+ * @shost: pointer to host data
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode session object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ * pointer to found flashnode session device object on success
+ * %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct device *dev;
+
+ dev = device_find_child(&shost->shost_gendev, data, fn);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
+
+/**
+ * iscsi_find_flashnode_conn - finds flashnode connection entry
+ * @fnode_sess: pointer to parent flashnode session entry
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode connection object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ * pointer to found flashnode connection device object on success
+ * %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
+ void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct device *dev;
+
+ dev = device_find_child(&fnode_sess->dev, data, fn);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
+
+static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
+{
+ if (!iscsi_is_flashnode_conn_dev(dev, NULL))
+ return 0;
+
+ return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
+}
+
+/**
+ * iscsi_destroy_flashnode_sess - destory flashnode session entry
+ * @fnode_sess: pointer to flashnode session entry to be destroyed
+ *
+ * Deletes the flashnode session entry and all children flashnode connection
+ * entries from sysfs
+ */
+void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
+{
+ int err;
+
+ err = device_for_each_child(&fnode_sess->dev, NULL,
+ iscsi_iter_destroy_flashnode_conn_fn);
+ if (err)
+ pr_err("Could not delete all connections for %s. Error %d.\n",
+ fnode_sess->dev.kobj.name, err);
+
+ device_unregister(&fnode_sess->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
+
+static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
+{
+ if (!iscsi_flashnode_bus_match(dev, NULL))
+ return 0;
+
+ iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
+ return 0;
+}
+
+/**
+ * iscsi_destroy_all_flashnode - destory all flashnode session entries
+ * @shost: pointer to host data
+ *
+ * Destroys all the flashnode session entries and all corresponding children
+ * flashnode connection entries from sysfs
+ */
+void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
+{
+ device_for_each_child(&shost->shost_gendev, NULL,
+ iscsi_iter_destroy_flashnode_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
+
+/*
* BSG support
*/
/**
@@ -2092,6 +2776,294 @@ static int iscsi_delete_chap(struct iscsi_transport *transport,
return err;
}
+static const struct {
+ enum iscsi_discovery_parent_type value;
+ char *name;
+} iscsi_discovery_parent_names[] = {
+ {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" },
+ {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" },
+ {ISCSI_DISC_PARENT_ISNS, "isns" },
+};
+
+char *iscsi_get_discovery_parent_name(int parent_type)
+{
+ int i;
+ char *state = "Unknown!";
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
+ if (iscsi_discovery_parent_names[i].value & parent_type) {
+ state = iscsi_discovery_parent_names[i].name;
+ break;
+ }
+ }
+ return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
+
+static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t *idx;
+ int err = 0;
+
+ if (!transport->set_flashnode_param) {
+ err = -ENOSYS;
+ goto exit_set_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = &ev->u.set_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+ flashnode_match_index);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, *idx, ev->u.set_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+ iscsi_is_flashnode_conn_dev);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+ err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_set_fnode:
+ return err;
+}
+
+static int iscsi_new_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, uint32_t len)
+{
+ char *data = (char *)ev + sizeof(*ev);
+ struct Scsi_Host *shost;
+ int index;
+ int err = 0;
+
+ if (!transport->new_flashnode) {
+ err = -ENOSYS;
+ goto exit_new_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.new_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ index = transport->new_flashnode(shost, data, len);
+
+ if (index >= 0)
+ ev->r.new_flashnode_ret.flashnode_idx = index;
+ else
+ err = -EIO;
+
+put_host:
+ scsi_host_put(shost);
+
+exit_new_fnode:
+ return err;
+}
+
+static int iscsi_del_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ uint32_t *idx;
+ int err = 0;
+
+ if (!transport->del_flashnode) {
+ err = -ENOSYS;
+ goto exit_del_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.del_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = &ev->u.del_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+ flashnode_match_index);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, *idx, ev->u.del_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ err = transport->del_flashnode(fnode_sess);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_del_fnode:
+ return err;
+}
+
+static int iscsi_login_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t *idx;
+ int err = 0;
+
+ if (!transport->login_flashnode) {
+ err = -ENOSYS;
+ goto exit_login_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.login_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = &ev->u.login_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+ flashnode_match_index);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, *idx, ev->u.login_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+ iscsi_is_flashnode_conn_dev);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+ err = transport->login_flashnode(fnode_sess, fnode_conn);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_login_fnode:
+ return err;
+}
+
+static int iscsi_logout_flashnode(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_bus_flash_session *fnode_sess;
+ struct iscsi_bus_flash_conn *fnode_conn;
+ struct device *dev;
+ uint32_t *idx;
+ int err = 0;
+
+ if (!transport->logout_flashnode) {
+ err = -ENOSYS;
+ goto exit_logout_fnode;
+ }
+
+ shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ idx = &ev->u.logout_flashnode.flashnode_idx;
+ fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+ flashnode_match_index);
+ if (!fnode_sess) {
+ pr_err("%s could not find flashnode %u for host no %u\n",
+ __func__, *idx, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+ iscsi_is_flashnode_conn_dev);
+ if (!dev) {
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+ err = transport->logout_flashnode(fnode_sess, fnode_conn);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_logout_fnode:
+ return err;
+}
+
+static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ struct Scsi_Host *shost;
+ struct iscsi_cls_session *session;
+ int err = 0;
+
+ if (!transport->logout_flashnode_sid) {
+ err = -ENOSYS;
+ goto exit_logout_sid;
+ }
+
+ shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
+ if (!shost) {
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.logout_flashnode.host_no);
+ err = -ENODEV;
+ goto put_host;
+ }
+
+ session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
+ if (!session) {
+ pr_err("%s could not find session id %u\n",
+ __func__, ev->u.logout_flashnode_sid.sid);
+ err = -EINVAL;
+ goto put_host;
+ }
+
+ err = transport->logout_flashnode_sid(session);
+
+put_host:
+ scsi_host_put(shost);
+
+exit_logout_sid:
+ return err;
+}
+
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
@@ -2246,6 +3218,27 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_DELETE_CHAP:
err = iscsi_delete_chap(transport, ev);
break;
+ case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
+ err = iscsi_set_flashnode_param(transport, ev,
+ nlmsg_attrlen(nlh,
+ sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_NEW_FLASHNODE:
+ err = iscsi_new_flashnode(transport, ev,
+ nlmsg_attrlen(nlh, sizeof(*ev)));
+ break;
+ case ISCSI_UEVENT_DEL_FLASHNODE:
+ err = iscsi_del_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGIN_FLASHNODE:
+ err = iscsi_login_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGOUT_FLASHNODE:
+ err = iscsi_logout_flashnode(transport, ev);
+ break;
+ case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
+ err = iscsi_logout_flashnode_sid(transport, ev);
+ break;
default:
err = -ENOSYS;
break;
@@ -2981,10 +3974,14 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_conn_class;
+ err = bus_register(&iscsi_flashnode_bus);
+ if (err)
+ goto unregister_session_class;
+
nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
if (!nls) {
err = -ENOBUFS;
- goto unregister_session_class;
+ goto unregister_flashnode_bus;
}
iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
@@ -2995,6 +3992,8 @@ static __init int iscsi_transport_init(void)
release_nls:
netlink_kernel_release(nls);
+unregister_flashnode_bus:
+ bus_unregister(&iscsi_flashnode_bus);
unregister_session_class:
transport_class_unregister(&iscsi_session_class);
unregister_conn_class:
@@ -3014,6 +4013,7 @@ static void __exit iscsi_transport_exit(void)
{
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
+ bus_unregister(&iscsi_flashnode_bus);
transport_class_unregister(&iscsi_connection_class);
transport_class_unregister(&iscsi_session_class);
transport_class_unregister(&iscsi_host_class);