summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-19 02:54:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-19 02:54:31 +0300
commit961cde93dee2658000ead32abffb8ddf0727abe0 (patch)
tree2419e204132abe2ec2bb7f08bd20042573cc9bd6 /drivers
parentf82c37e7bb4c4d9b6a476c642d5c2d2efbd6f240 (diff)
parent0d9dc7c8b9b7fa0f53647423b41056ee1beed735 (diff)
downloadlinux-961cde93dee2658000ead32abffb8ddf0727abe0.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (69 commits) [SCSI] scsi_transport_fc: Fix synchronization issue while deleting vport [SCSI] bfa: Update the driver version to 2.1.2.1. [SCSI] bfa: Remove unused header files and did some cleanup. [SCSI] bfa: Handle SCSI IO underrun case. [SCSI] bfa: FCS and include file changes. [SCSI] bfa: Modified the portstats get/clear logic [SCSI] bfa: Replace bfa_get_attr() with specific APIs [SCSI] bfa: New portlog entries for events (FIP/FLOGI/FDISC/LOGO). [SCSI] bfa: Rename pport to fcport in BFA FCS. [SCSI] bfa: IOC fixes, check for IOC down condition. [SCSI] bfa: In MSIX mode, ignore spurious RME interrupts when FCoE ports are in FW mismatch state. [SCSI] bfa: Fix Command Queue (CPE) full condition check and ack CPE interrupt. [SCSI] bfa: IOC recovery fix in fcmode. [SCSI] bfa: AEN and byte alignment fixes. [SCSI] bfa: Introduce a link notification state machine. [SCSI] bfa: Added firmware save clear feature for BFA driver. [SCSI] bfa: FCS authentication related changes. [SCSI] bfa: PCI VPD, FIP and include file changes. [SCSI] bfa: Fix to copy fpma MAC when requested by user space application. [SCSI] bfa: RPORT state machine: direct attach mode fix. ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/scsi/Kconfig6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c7
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c201
-rw-r--r--drivers/scsi/be2iscsi/be_main.h11
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c14
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h8
-rw-r--r--drivers/scsi/bfa/Makefile8
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_fcport.c1709
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c63
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c75
-rw-r--r--drivers/scsi/bfa/bfa_fcs_port.c11
-rw-r--r--drivers/scsi/bfa/bfa_fcs_uf.c8
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c13
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c9
-rw-r--r--drivers/scsi/bfa/bfa_intr.c111
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c762
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h57
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c274
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c423
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.c24
-rw-r--r--drivers/scsi/bfa/bfa_iocfc.h3
-rw-r--r--drivers/scsi/bfa/bfa_ioim.c22
-rw-r--r--drivers/scsi/bfa/bfa_itnim.c30
-rw-r--r--drivers/scsi/bfa/bfa_lps.c134
-rw-r--r--drivers/scsi/bfa/bfa_module.c4
-rw-r--r--drivers/scsi/bfa/bfa_modules_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_port_priv.h57
-rw-r--r--drivers/scsi/bfa/bfa_priv.h2
-rw-r--r--drivers/scsi/bfa/bfa_rport.c26
-rw-r--r--drivers/scsi/bfa/bfa_trcmod_priv.h62
-rw-r--r--drivers/scsi/bfa/bfa_tskim.c14
-rw-r--r--drivers/scsi/bfa/bfad.c208
-rw-r--r--drivers/scsi/bfa/bfad_attr.c76
-rw-r--r--drivers/scsi/bfa/bfad_attr.h9
-rw-r--r--drivers/scsi/bfa/bfad_drv.h35
-rw-r--r--drivers/scsi/bfa/bfad_im.c53
-rw-r--r--drivers/scsi/bfa/bfad_im.h5
-rw-r--r--drivers/scsi/bfa/bfad_intr.c11
-rw-r--r--drivers/scsi/bfa/fabric.c59
-rw-r--r--drivers/scsi/bfa/fcbuild.h6
-rw-r--r--drivers/scsi/bfa/fcpim.c51
-rw-r--r--drivers/scsi/bfa/fcs_fabric.h2
-rw-r--r--drivers/scsi/bfa/fcs_fcpim.h5
-rw-r--r--drivers/scsi/bfa/fcs_lport.h7
-rw-r--r--drivers/scsi/bfa/fcs_port.h3
-rw-r--r--drivers/scsi/bfa/fcs_rport.h3
-rw-r--r--drivers/scsi/bfa/fcs_uf.h3
-rw-r--r--drivers/scsi/bfa/fcs_vport.h8
-rw-r--r--drivers/scsi/bfa/fdmi.c79
-rw-r--r--drivers/scsi/bfa/include/aen/bfa_aen.h50
-rw-r--r--drivers/scsi/bfa/include/bfa.h22
-rw-r--r--drivers/scsi/bfa/include/bfa_svc.h101
-rw-r--r--drivers/scsi/bfa/include/bfa_timer.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi.h4
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_cbreg.h16
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ctreg.h26
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_ioc.h2
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_lps.h8
-rw-r--r--drivers/scsi/bfa/include/bfi/bfi_pport.h172
-rw-r--r--drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h4
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_log.h2
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_plog.h9
-rw-r--r--drivers/scsi/bfa/include/cs/bfa_sm.h8
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_aen.h10
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_auth.h22
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_cee.h14
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_driver.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ethport.h1
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_fcport.h94
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_common.h32
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_im_team.h72
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_ioc.h3
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h12
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_lport.h4
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_mfg.h111
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_port.h19
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_pport.h151
-rw-r--r--drivers/scsi/bfa/include/defs/bfa_defs_status.h17
-rw-r--r--drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h1
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs.h5
-rw-r--r--drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h8
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_hal.h6
-rw-r--r--drivers/scsi/bfa/include/log/bfa_log_linux.h16
-rw-r--r--drivers/scsi/bfa/include/protocol/fc.h5
-rw-r--r--drivers/scsi/bfa/include/protocol/pcifw.h75
-rw-r--r--drivers/scsi/bfa/loop.c2
-rw-r--r--drivers/scsi/bfa/lport_api.c5
-rw-r--r--drivers/scsi/bfa/ms.c29
-rw-r--r--drivers/scsi/bfa/ns.c36
-rw-r--r--drivers/scsi/bfa/rport.c91
-rw-r--r--drivers/scsi/bfa/rport_api.c2
-rw-r--r--drivers/scsi/bfa/rport_ftrs.c12
-rw-r--r--drivers/scsi/bfa/scn.c10
-rw-r--r--drivers/scsi/bfa/vport.c86
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/hpsa.c330
-rw-r--r--drivers/scsi/hpsa.h7
-rw-r--r--drivers/scsi/hpsa_cmd.h20
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c27
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c19
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c13
-rw-r--r--drivers/scsi/ipr.c1756
-rw-r--r--drivers/scsi/ipr.h467
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c332
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c142
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c527
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c277
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c413
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h38
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c7
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/raid_class.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c24
-rw-r--r--drivers/scsi/sd.c4
132 files changed, 7202 insertions, 3524 deletions
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 71237f8f78f7..e78af36d3a0e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -613,7 +613,7 @@ static struct scsi_host_template iscsi_iser_sht = {
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
- .eh_target_reset_handler= iscsi_eh_target_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "iscsi_iser",
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9191d1ea6451..75f2336807cb 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,9 +1,15 @@
menu "SCSI device support"
+config SCSI_MOD
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+
config RAID_ATTRS
tristate "RAID Transport Class"
default n
depends on BLOCK
+ depends on SCSI_MOD
---help---
Provides RAID
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 67098578fba4..cda6642c7368 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -32,18 +32,11 @@ void be_mcc_notify(struct beiscsi_hba *phba)
unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
{
unsigned int tag = 0;
- unsigned int num = 0;
-mcc_tag_rdy:
if (phba->ctrl.mcc_tag_available) {
tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
phba->ctrl.mcc_numtag[tag] = 0;
- } else {
- udelay(100);
- num++;
- if (num < mcc_timeout)
- goto mcc_tag_rdy;
}
if (tag) {
phba->ctrl.mcc_tag_available--;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 29a3aaf35f9f..c3928cb8b042 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -482,7 +482,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
if (!tag) {
SE_DEBUG(DBG_LVL_1,
- "mgmt_invalidate_connection Failed for cid=%d \n",
+ "mgmt_open_connection Failed for cid=%d \n",
beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -701,7 +701,7 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d \n",
- beiscsi_ep->ep_cid);
+ beiscsi_ep->ep_cid);
} else {
wait_event_interruptible(phba->ctrl.mcc_wait[tag],
phba->ctrl.mcc_numtag[tag]);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7c22616ab141..fcfb29e02d8a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -58,6 +58,123 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
return 0;
}
+static int beiscsi_eh_abort(struct scsi_cmnd *sc)
+{
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
+ struct beiscsi_io_task *aborted_io_task;
+ struct iscsi_conn *conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *session;
+ struct invalidate_command_table *inv_tbl;
+ unsigned int cid, tag, num_invalidate;
+
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+
+ spin_lock_bh(&session->lock);
+ if (!aborted_task || !aborted_task->sc) {
+ /* we raced */
+ spin_unlock_bh(&session->lock);
+ return SUCCESS;
+ }
+
+ aborted_io_task = aborted_task->dd_data;
+ if (!aborted_io_task->scsi_cmnd) {
+ /* raced or invalid command */
+ spin_unlock_bh(&session->lock);
+ return SUCCESS;
+ }
+ spin_unlock_bh(&session->lock);
+ conn = aborted_task->conn;
+ beiscsi_conn = conn->dd_data;
+ phba = beiscsi_conn->phba;
+
+ /* invalidate iocb */
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl = phba->inv_tbl;
+ memset(inv_tbl, 0x0, sizeof(*inv_tbl));
+ inv_tbl->cid = cid;
+ inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
+ num_invalidate = 1;
+ tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
+ if (!tag) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "mgmt_invalidate_icds could not be"
+ " submitted\n");
+ return FAILED;
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ free_mcc_tag(&phba->ctrl, tag);
+ }
+
+ return iscsi_eh_abort(sc);
+}
+
+static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
+{
+ struct iscsi_task *abrt_task;
+ struct beiscsi_io_task *abrt_io_task;
+ struct iscsi_conn *conn;
+ struct beiscsi_conn *beiscsi_conn;
+ struct beiscsi_hba *phba;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+ struct invalidate_command_table *inv_tbl;
+ unsigned int cid, tag, i, num_invalidate;
+ int rc = FAILED;
+
+ /* invalidate iocbs */
+ cls_session = starget_to_session(scsi_target(sc->device));
+ session = cls_session->dd_data;
+ spin_lock_bh(&session->lock);
+ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+ goto unlock;
+
+ conn = session->leadconn;
+ beiscsi_conn = conn->dd_data;
+ phba = beiscsi_conn->phba;
+ cid = beiscsi_conn->beiscsi_conn_cid;
+ inv_tbl = phba->inv_tbl;
+ memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
+ num_invalidate = 0;
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ abrt_task = conn->session->cmds[i];
+ abrt_io_task = abrt_task->dd_data;
+ if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+ continue;
+
+ if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+ continue;
+
+ inv_tbl->cid = cid;
+ inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
+ num_invalidate++;
+ inv_tbl++;
+ }
+ spin_unlock_bh(&session->lock);
+ inv_tbl = phba->inv_tbl;
+
+ tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
+ if (!tag) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "mgmt_invalidate_icds could not be"
+ " submitted\n");
+ return FAILED;
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ free_mcc_tag(&phba->ctrl, tag);
+ }
+
+ return iscsi_eh_device_reset(sc);
+unlock:
+ spin_unlock_bh(&session->lock);
+ return rc;
+}
+
/*------------------- PCI Driver operations and data ----------------- */
static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -74,12 +191,12 @@ static struct scsi_host_template beiscsi_sht = {
.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
.proc_name = DRV_NAME,
.queuecommand = iscsi_queuecommand,
- .eh_abort_handler = iscsi_eh_abort,
.change_queue_depth = iscsi_change_queue_depth,
.slave_configure = beiscsi_slave_configure,
.target_alloc = iscsi_target_alloc,
- .eh_device_reset_handler = iscsi_eh_device_reset,
- .eh_target_reset_handler = iscsi_eh_target_reset,
+ .eh_abort_handler = beiscsi_eh_abort,
+ .eh_device_reset_handler = beiscsi_eh_device_reset,
+ .eh_target_reset_handler = iscsi_eh_session_reset,
.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
.can_queue = BE2_IO_DEPTH,
.this_id = -1,
@@ -242,7 +359,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
+ BE2_TMFS
+ BE2_NOPOUT_REQ));
phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
- phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
+ phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -946,14 +1063,18 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
case HWH_TYPE_IO:
case HWH_TYPE_IO_RD:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
- ISCSI_OP_NOOP_OUT) {
+ ISCSI_OP_NOOP_OUT)
be_complete_nopin_resp(beiscsi_conn, task, psol);
- } else
+ else
be_complete_io(beiscsi_conn, task, psol);
break;
case HWH_TYPE_LOGOUT:
- be_complete_logout(beiscsi_conn, task, psol);
+ if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+ be_complete_logout(beiscsi_conn, task, psol);
+ else
+ be_complete_tmf(beiscsi_conn, task, psol);
+
break;
case HWH_TYPE_LOGIN:
@@ -962,10 +1083,6 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
"- Solicited path \n");
break;
- case HWH_TYPE_TMF:
- be_complete_tmf(beiscsi_conn, task, psol);
- break;
-
case HWH_TYPE_NOP:
be_complete_nopin_resp(beiscsi_conn, task, psol);
break;
@@ -2052,7 +2169,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
((sizeof(struct iscsi_wrb) *
phba->params.wrbs_per_cxn));
- for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
+ for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
if (num_cxn_wrb) {
for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
@@ -3073,14 +3190,18 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
iowrite32(reg, addr);
- for (i = 0; i <= phba->num_cpus; i++) {
- eq = &phwi_context->be_eq[i].q;
+ if (!phba->msix_enabled) {
+ eq = &phwi_context->be_eq[0].q;
SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+ } else {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ eq = &phwi_context->be_eq[i].q;
+ SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
+ hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+ }
}
- } else
- shost_printk(KERN_WARNING, phba->shost,
- "In hwi_enable_intr, Not Enabled \n");
+ }
return true;
}
@@ -3476,19 +3597,13 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
static int beiscsi_mtask(struct iscsi_task *task)
{
- struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
+ struct beiscsi_io_task *io_task = task->dd_data;
struct iscsi_conn *conn = task->conn;
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_hba *phba = beiscsi_conn->phba;
- struct iscsi_session *session;
struct iscsi_wrb *pwrb = NULL;
- struct hwi_controller *phwi_ctrlr;
- struct hwi_wrb_context *pwrb_context;
- struct wrb_handle *pwrb_handle;
unsigned int doorbell = 0;
- unsigned int i, cid;
- struct iscsi_task *aborted_task;
- unsigned int tag;
+ unsigned int cid;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
@@ -3499,6 +3614,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
io_task->pwrb_handle->wrb_index);
AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
io_task->psgl_handle->sgl_index);
+
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
@@ -3523,33 +3639,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- session = conn->session;
- i = ((struct iscsi_tm *)task->hdr)->rtt;
- phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[cid -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
- >> 16];
- aborted_task = pwrb_handle->pio_handle;
- if (!aborted_task)
- return 0;
-
- aborted_io_task = aborted_task->dd_data;
- if (!aborted_io_task->scsi_cmnd)
- return 0;
-
- tag = mgmt_invalidate_icds(phba,
- aborted_io_task->psgl_handle->sgl_index,
- cid);
- if (!tag) {
- shost_printk(KERN_WARNING, phba->shost,
- "mgmt_invalidate_icds could not be"
- " submitted\n");
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
- }
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
INI_TMF_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
@@ -3558,7 +3647,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
case ISCSI_OP_LOGOUT:
AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- HWH_TYPE_LOGOUT);
+ HWH_TYPE_LOGOUT);
hwi_write_buffer(pwrb, task);
break;
@@ -3584,17 +3673,12 @@ static int beiscsi_mtask(struct iscsi_task *task)
static int beiscsi_task_xmit(struct iscsi_task *task)
{
- struct iscsi_conn *conn = task->conn;
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
- SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
- "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
- task, conn, beiscsi_conn);
if (!sc)
return beiscsi_mtask(task);
@@ -3699,7 +3783,6 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
" Failed in beiscsi_hba_alloc \n");
goto disable_pci;
}
- SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
switch (pcidev->device) {
case BE_DEVICE_ID1:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index c53a80ab796c..87ec21280a37 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -257,6 +257,11 @@ struct hba_parameters {
unsigned int num_sge;
};
+struct invalidate_command_table {
+ unsigned short icd;
+ unsigned short cid;
+} __packed;
+
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -329,6 +334,8 @@ struct beiscsi_hba {
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
+ struct invalidate_command_table inv_tbl[128];
+
};
struct beiscsi_session {
@@ -491,8 +498,6 @@ struct hwi_async_entry {
struct list_head data_busy_list;
};
-#define BE_MIN_ASYNC_ENTRIES 128
-
struct hwi_async_pdu_context {
struct {
struct be_bus_address pa_base;
@@ -533,7 +538,7 @@ struct hwi_async_pdu_context {
* This is a varying size list! Do not add anything
* after this entry!!
*/
- struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES];
+ struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
};
#define PDUCQE_CODE_MASK 0x0000003F
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 317bcd042ced..72617b650a7e 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -145,14 +145,15 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
}
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
- unsigned int icd, unsigned int cid)
+ struct invalidate_command_table *inv_tbl,
+ unsigned int num_invalidate, unsigned int cid)
{
struct be_dma_mem nonemb_cmd;
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_sge *sge;
struct invalidate_commands_params_in *req;
- unsigned int tag = 0;
+ unsigned int i, tag = 0;
spin_lock(&ctrl->mbox_lock);
tag = alloc_mcc_tag(phba);
@@ -183,9 +184,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
sizeof(*req));
req->ref_handle = 0;
req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
- req->icd_count = 0;
- req->table[req->icd_count].icd = icd;
- req->table[req->icd_count].cid = cid;
+ for (i = 0; i < num_invalidate; i++) {
+ req->table[i].icd = inv_tbl->icd;
+ req->table[i].cid = inv_tbl->cid;
+ req->icd_count++;
+ inv_tbl++;
+ }
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index ecead6a5aa56..3d316b82feb1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -94,7 +94,8 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid,
unsigned int upload_flag);
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
- unsigned int icd, unsigned int cid);
+ struct invalidate_command_table *inv_tbl,
+ unsigned int num_invalidate, unsigned int cid);
struct iscsi_invalidate_connection_params_in {
struct be_cmd_req_hdr hdr;
@@ -116,11 +117,6 @@ union iscsi_invalidate_connection_params {
struct iscsi_invalidate_connection_params_out response;
} __packed;
-struct invalidate_command_table {
- unsigned short icd;
- unsigned short cid;
-} __packed;
-
struct invalidate_commands_params_in {
struct be_cmd_req_hdr hdr;
unsigned int ref_handle;
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 1d6009490d1c..17e06cae71b2 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -2,14 +2,14 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
-bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o
-bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
+bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
+bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
bfa-y += bfa_csdebug.o bfa_sm.o plog.o
-bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
+bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
-ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna
+ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 44e2d1155c51..0c08e185a766 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -385,6 +385,15 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
}
/**
+ * Clear the saved firmware trace information of an IOC.
+ */
+void
+bfa_debug_fwsave_clear(struct bfa_s *bfa)
+{
+ bfa_ioc_debug_fwsave_clear(&bfa->ioc);
+}
+
+/**
* Fetch firmware trace data.
*
* @param[in] bfa BFA instance
@@ -399,4 +408,14 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
{
return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
}
+
+/**
+ * Reset hw semaphore & usage cnt regs and initialize.
+ */
+void
+bfa_chip_reset(struct bfa_s *bfa)
+{
+ bfa_ioc_ownership_reset(&bfa->ioc);
+ bfa_ioc_pll_init(&bfa->ioc);
+}
#endif
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index aef648b55dfc..c589488db0c1 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -23,40 +23,33 @@
#include <cs/bfa_plog.h>
#include <aen/bfa_aen_port.h>
-BFA_TRC_FILE(HAL, PPORT);
-BFA_MODULE(pport);
-
-#define bfa_pport_callback(__pport, __event) do { \
- if ((__pport)->bfa->fcs) { \
- (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
- } else { \
- (__pport)->hcb_event = (__event); \
- bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
- __bfa_cb_port_event, (__pport)); \
- } \
-} while (0)
+BFA_TRC_FILE(HAL, FCPORT);
+BFA_MODULE(fcport);
/*
* The port is considered disabled if corresponding physical port or IOC are
* disabled explicitly
*/
#define BFA_PORT_IS_DISABLED(bfa) \
- ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
+ ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
/*
* forward declarations
*/
-static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
-static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
-static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
-static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
-static void bfa_pport_set_wwns(struct bfa_pport_s *port);
-static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
-static void bfa_port_stats_timeout(void *cbarg);
-static void bfa_port_stats_clr_timeout(void *cbarg);
+static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
+static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
+static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
+static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_callback(struct bfa_fcport_s *fcport,
+ enum bfa_pport_linkstate event);
+static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
+ enum bfa_pport_linkstate event);
+static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_stats_get_timeout(void *cbarg);
+static void bfa_fcport_stats_clr_timeout(void *cbarg);
/**
* bfa_pport_private
@@ -65,111 +58,114 @@ static void bfa_port_stats_clr_timeout(void *cbarg);
/**
* BFA port state machine events
*/
-enum bfa_pport_sm_event {
- BFA_PPORT_SM_START = 1, /* start port state machine */
- BFA_PPORT_SM_STOP = 2, /* stop port state machine */
- BFA_PPORT_SM_ENABLE = 3, /* enable port */
- BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
- BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
- BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
- BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
- BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
- BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
+enum bfa_fcport_sm_event {
+ BFA_FCPORT_SM_START = 1, /* start port state machine */
+ BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
+ BFA_FCPORT_SM_ENABLE = 3, /* enable port */
+ BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
+ BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
+ BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
+ BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
+ BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
+ BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
};
-static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
-static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event);
+/**
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+ BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
+ BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
+ BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
+};
+
+static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event);
+
+static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
+static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event);
static struct bfa_sm_table_s hal_pport_sm_table[] = {
- {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
- {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
- {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
- {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
- {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
- {BFA_SM(bfa_pport_sm_disabling_qwait),
- BFA_PPORT_ST_DISABLING_QWAIT},
- {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
- {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
- {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
- {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
- {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
+ {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
+ {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
+ {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
+ {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
+ {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
+ {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
+ {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
+ {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
+ {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
+ {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
+ {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
};
static void
-bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
{
union bfa_aen_data_u aen_data;
- struct bfa_log_mod_s *logmod = pport->bfa->logm;
- wwn_t pwwn = pport->pwwn;
+ struct bfa_log_mod_s *logmod = fcport->bfa->logm;
+ wwn_t pwwn = fcport->pwwn;
char pwwn_ptr[BFA_STRING_32];
- struct bfa_ioc_attr_s ioc_attr;
+ memset(&aen_data, 0, sizeof(aen_data));
wwn2str(pwwn_ptr, pwwn);
- switch (event) {
- case BFA_PORT_AEN_ONLINE:
- bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
- break;
- case BFA_PORT_AEN_OFFLINE:
- bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
- break;
- case BFA_PORT_AEN_ENABLE:
- bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
- break;
- case BFA_PORT_AEN_DISABLE:
- bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
- break;
- case BFA_PORT_AEN_DISCONNECT:
- bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
- break;
- case BFA_PORT_AEN_QOS_NEG:
- bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
- bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
- aen_data.port.ioc_type = ioc_attr.ioc_type;
+ aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
aen_data.port.pwwn = pwwn;
}
static void
-bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_START:
+ case BFA_FCPORT_SM_START:
/**
* Start event after IOC is configured and BFA is started.
*/
- if (bfa_pport_send_enable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
break;
- case BFA_PPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_ENABLE:
/**
* Port is persistently configured to be in enabled state. Do
* not change state. Port enabling is done when START event is
@@ -177,389 +173,412 @@ bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
*/
break;
- case BFA_PPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DISABLE:
/**
* If a port is persistently configured to be disabled, the
* first event will a port disable request.
*/
- bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event)
+bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_QRESUME:
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
- bfa_pport_send_enable(pport);
+ case BFA_FCPORT_SM_QRESUME:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+ bfa_fcport_send_enable(fcport);
break;
- case BFA_PPORT_SM_STOP:
- bfa_reqq_wcancel(&pport->reqq_wait);
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ case BFA_FCPORT_SM_STOP:
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
- case BFA_PPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_ENABLE:
/**
* Already enable is in progress.
*/
break;
- case BFA_PPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DISABLE:
/**
* Just send disable request to firmware when room becomes
* available in request queue.
*/
- bfa_sm_set_state(pport, bfa_pport_sm_disabled);
- bfa_reqq_wcancel(&pport->reqq_wait);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
- case BFA_PPORT_SM_LINKUP:
- case BFA_PPORT_SM_LINKDOWN:
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
/**
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_reqq_wcancel(&pport->reqq_wait);
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_reqq_wcancel(&fcport->reqq_wait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_FWRSP:
- case BFA_PPORT_SM_LINKDOWN:
- bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
+ case BFA_FCPORT_SM_FWRSP:
+ case BFA_FCPORT_SM_LINKDOWN:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
break;
- case BFA_PPORT_SM_LINKUP:
- bfa_pport_update_linkinfo(pport);
- bfa_sm_set_state(pport, bfa_pport_sm_linkup);
+ case BFA_FCPORT_SM_LINKUP:
+ bfa_fcport_update_linkinfo(fcport);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
- bfa_assert(pport->event_cbfn);
- bfa_pport_callback(pport, BFA_PPORT_LINKUP);
+ bfa_assert(fcport->event_cbfn);
+ bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
break;
- case BFA_PPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_ENABLE:
/**
* Already being enabled.
*/
break;
- case BFA_PPORT_SM_DISABLE:
- if (bfa_pport_send_disable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_LINKUP:
- bfa_pport_update_linkinfo(pport);
- bfa_sm_set_state(pport, bfa_pport_sm_linkup);
- bfa_assert(pport->event_cbfn);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ case BFA_FCPORT_SM_LINKUP:
+ bfa_fcport_update_linkinfo(fcport);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+ bfa_assert(fcport->event_cbfn);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
- bfa_pport_callback(pport, BFA_PPORT_LINKUP);
- bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
+
+ if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+
+ bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
+ bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
+
+ if (pevent->link_state.fcf.fipfailed)
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_FIP_FCF_DISC, 0,
+ "FIP FCF Discovery Failed");
+ else
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+ BFA_PL_EID_FIP_FCF_DISC, 0,
+ "FIP FCF Discovered");
+ }
+
+ bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
/**
* If QoS is enabled and it is not online,
* Send a separate event.
*/
- if ((pport->cfg.qos_enabled)
- && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
- bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
+ if ((fcport->cfg.qos_enabled)
+ && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
break;
- case BFA_PPORT_SM_LINKDOWN:
+ case BFA_FCPORT_SM_LINKDOWN:
/**
* Possible to get link down event.
*/
break;
- case BFA_PPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_ENABLE:
/**
* Already enabled.
*/
break;
- case BFA_PPORT_SM_DISABLE:
- if (bfa_pport_send_disable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_ENABLE:
+ case BFA_FCPORT_SM_ENABLE:
/**
* Already enabled.
*/
break;
- case BFA_PPORT_SM_DISABLE:
- if (bfa_pport_send_disable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+ case BFA_FCPORT_SM_DISABLE:
+ if (bfa_fcport_send_disable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
- bfa_pport_reset_linkinfo(pport);
- bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
- case BFA_PPORT_SM_LINKDOWN:
- bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
- bfa_pport_reset_linkinfo(pport);
- bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ case BFA_FCPORT_SM_LINKDOWN:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
- if (BFA_PORT_IS_DISABLED(pport->bfa))
- bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
else
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
- bfa_pport_reset_linkinfo(pport);
- if (BFA_PORT_IS_DISABLED(pport->bfa))
- bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ bfa_fcport_reset_linkinfo(fcport);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
else
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
- bfa_pport_reset_linkinfo(pport);
- bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
- if (BFA_PORT_IS_DISABLED(pport->bfa))
- bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+ bfa_fcport_reset_linkinfo(fcport);
+ bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+ if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
else
- bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
- enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_QRESUME:
- bfa_sm_set_state(pport, bfa_pport_sm_disabling);
- bfa_pport_send_disable(pport);
+ case BFA_FCPORT_SM_QRESUME:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+ bfa_fcport_send_disable(fcport);
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
- bfa_reqq_wcancel(&pport->reqq_wait);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
break;
- case BFA_PPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DISABLE:
/**
* Already being disabled.
*/
break;
- case BFA_PPORT_SM_LINKUP:
- case BFA_PPORT_SM_LINKDOWN:
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
/**
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
- bfa_reqq_wcancel(&pport->reqq_wait);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+ bfa_reqq_wcancel(&fcport->reqq_wait);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_FWRSP:
- bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ case BFA_FCPORT_SM_FWRSP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
- case BFA_PPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DISABLE:
/**
* Already being disabled.
*/
break;
- case BFA_PPORT_SM_ENABLE:
- if (bfa_pport_send_enable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ case BFA_FCPORT_SM_ENABLE:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
- case BFA_PPORT_SM_LINKUP:
- case BFA_PPORT_SM_LINKDOWN:
+ case BFA_FCPORT_SM_LINKUP:
+ case BFA_FCPORT_SM_LINKDOWN:
/**
* Possible to get link events when doing back-to-back
* enable/disables.
*/
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_START:
+ case BFA_FCPORT_SM_START:
/**
* Ignore start event for a port that is disabled.
*/
break;
- case BFA_PPORT_SM_STOP:
- bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+ case BFA_FCPORT_SM_STOP:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
break;
- case BFA_PPORT_SM_ENABLE:
- if (bfa_pport_send_enable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ case BFA_FCPORT_SM_ENABLE:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
- bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+ bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
- bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
- case BFA_PPORT_SM_DISABLE:
+ case BFA_FCPORT_SM_DISABLE:
/**
* Already disabled.
*/
break;
- case BFA_PPORT_SM_HWFAIL:
- bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+ case BFA_FCPORT_SM_HWFAIL:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
break;
default:
- bfa_sm_fault(pport->bfa, event);
+ bfa_sm_fault(fcport->bfa, event);
}
}
static void
-bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_START:
- if (bfa_pport_send_enable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ case BFA_FCPORT_SM_START:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
break;
default:
@@ -574,16 +593,17 @@ bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
* Port is enabled. IOC is down/failed.
*/
static void
-bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_START:
- if (bfa_pport_send_enable(pport))
- bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+ case BFA_FCPORT_SM_START:
+ if (bfa_fcport_send_enable(fcport))
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
else
- bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
break;
default:
@@ -598,17 +618,18 @@ bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
* Port is disabled. IOC is down/failed.
*/
static void
-bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+ enum bfa_fcport_sm_event event)
{
- bfa_trc(pport->bfa, event);
+ bfa_trc(fcport->bfa, event);
switch (event) {
- case BFA_PPORT_SM_START:
- bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+ case BFA_FCPORT_SM_START:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
break;
- case BFA_PPORT_SM_ENABLE:
- bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+ case BFA_FCPORT_SM_ENABLE:
+ bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
break;
default:
@@ -619,41 +640,226 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
}
}
+/**
+ * Link state is down
+ */
+static void
+bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is waiting for down notification
+ */
+static void
+bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is waiting for down notification and there is a pending up
+ */
+static void
+bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is up
+ */
+static void
+bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+ break;
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is waiting for up notification
+ */
+static void
+bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is waiting for up notification and there is a pending down
+ */
+static void
+bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKUP:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+ bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
+
+/**
+ * Link state is waiting for up notification and there are pending down and up
+ */
+static void
+bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+ enum bfa_fcport_ln_sm_event event)
+{
+ bfa_trc(ln->fcport->bfa, event);
+
+ switch (event) {
+ case BFA_FCPORT_LN_SM_LINKDOWN:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+ break;
+
+ case BFA_FCPORT_LN_SM_NOTIFICATION:
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+ bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+ break;
+
+ default:
+ bfa_sm_fault(ln->fcport->bfa, event);
+ }
+}
/**
* bfa_pport_private
*/
static void
-__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
+__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
{
- struct bfa_pport_s *pport = cbarg;
+ struct bfa_fcport_ln_s *ln = cbarg;
if (complete)
- pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
+ ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
+ else
+ bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
}
-#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
+static void
+bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
+{
+ if (fcport->bfa->fcs) {
+ fcport->event_cbfn(fcport->event_cbarg, event);
+ return;
+ }
+
+ switch (event) {
+ case BFA_PPORT_LINKUP:
+ bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
+ break;
+ case BFA_PPORT_LINKDOWN:
+ bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
+ break;
+ default:
+ bfa_assert(0);
+ }
+}
+
+static void
+bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
+{
+ ln->ln_event = event;
+ bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
+}
+
+#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
BFA_CACHELINE_SZ))
static void
-bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
u32 *dm_len)
{
- *dm_len += PPORT_STATS_DMA_SZ;
+ *dm_len += FCPORT_STATS_DMA_SZ;
}
static void
-bfa_pport_qresume(void *cbarg)
+bfa_fcport_qresume(void *cbarg)
{
- struct bfa_pport_s *port = cbarg;
+ struct bfa_fcport_s *fcport = cbarg;
- bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
}
static void
-bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
{
u8 *dm_kva;
u64 dm_pa;
@@ -661,12 +867,12 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
dm_kva = bfa_meminfo_dma_virt(meminfo);
dm_pa = bfa_meminfo_dma_phys(meminfo);
- pport->stats_kva = dm_kva;
- pport->stats_pa = dm_pa;
- pport->stats = (union bfa_pport_stats_u *)dm_kva;
+ fcport->stats_kva = dm_kva;
+ fcport->stats_pa = dm_pa;
+ fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
- dm_kva += PPORT_STATS_DMA_SZ;
- dm_pa += PPORT_STATS_DMA_SZ;
+ dm_kva += FCPORT_STATS_DMA_SZ;
+ dm_pa += FCPORT_STATS_DMA_SZ;
bfa_meminfo_dma_virt(meminfo) = dm_kva;
bfa_meminfo_dma_phys(meminfo) = dm_pa;
@@ -676,18 +882,21 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
* Memory initialization.
*/
static void
-bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
- struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
+ struct bfa_fcport_ln_s *ln = &fcport->ln;
- bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
- pport->bfa = bfa;
+ bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+ fcport->bfa = bfa;
+ ln->fcport = fcport;
- bfa_pport_mem_claim(pport, meminfo);
+ bfa_fcport_mem_claim(fcport, meminfo);
- bfa_sm_set_state(pport, bfa_pport_sm_uninit);
+ bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
+ bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
/**
* initialize and set default configuration
@@ -699,30 +908,30 @@ bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
- bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
+ bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
static void
-bfa_pport_initdone(struct bfa_s *bfa)
+bfa_fcport_initdone(struct bfa_s *bfa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
/**
* Initialize port attributes from IOC hardware data.
*/
- bfa_pport_set_wwns(pport);
- if (pport->cfg.maxfrsize == 0)
- pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
- pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
- pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+ bfa_fcport_set_wwns(fcport);
+ if (fcport->cfg.maxfrsize == 0)
+ fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+ fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+ fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
- bfa_assert(pport->cfg.maxfrsize);
- bfa_assert(pport->cfg.rx_bbcredit);
- bfa_assert(pport->speed_sup);
+ bfa_assert(fcport->cfg.maxfrsize);
+ bfa_assert(fcport->cfg.rx_bbcredit);
+ bfa_assert(fcport->speed_sup);
}
static void
-bfa_pport_detach(struct bfa_s *bfa)
+bfa_fcport_detach(struct bfa_s *bfa)
{
}
@@ -730,95 +939,97 @@ bfa_pport_detach(struct bfa_s *bfa)
* Called when IOC is ready.
*/
static void
-bfa_pport_start(struct bfa_s *bfa)
+bfa_fcport_start(struct bfa_s *bfa)
{
- bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
}
/**
* Called before IOC is stopped.
*/
static void
-bfa_pport_stop(struct bfa_s *bfa)
+bfa_fcport_stop(struct bfa_s *bfa)
{
- bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
}
/**
* Called when IOC failure is detected.
*/
static void
-bfa_pport_iocdisable(struct bfa_s *bfa)
+bfa_fcport_iocdisable(struct bfa_s *bfa)
{
- bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
}
static void
-bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
+bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
{
- struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
+ struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
- pport->speed = pevent->link_state.speed;
- pport->topology = pevent->link_state.topology;
+ fcport->speed = pevent->link_state.speed;
+ fcport->topology = pevent->link_state.topology;
- if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
- pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
+ if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
+ fcport->myalpa =
+ pevent->link_state.tl.loop_info.myalpa;
/*
* QoS Details
*/
- bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
- bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
+ bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
+ bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
- bfa_trc(pport->bfa, pport->speed);
- bfa_trc(pport->bfa, pport->topology);
+ bfa_trc(fcport->bfa, fcport->speed);
+ bfa_trc(fcport->bfa, fcport->topology);
}
static void
-bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
+bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
{
- pport->speed = BFA_PPORT_SPEED_UNKNOWN;
- pport->topology = BFA_PPORT_TOPOLOGY_NONE;
+ fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
+ fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
}
/**
* Send port enable message to firmware.
*/
static bfa_boolean_t
-bfa_pport_send_enable(struct bfa_pport_s *port)
+bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
- struct bfi_pport_enable_req_s *m;
+ struct bfi_fcport_enable_req_s *m;
/**
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
- port->msgtag++;
+ fcport->msgtag++;
/**
* check for room in queue to send request now
*/
- m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!m) {
- bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->reqq_wait);
return BFA_FALSE;
}
- bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
- bfa_lpuid(port->bfa));
- m->nwwn = port->nwwn;
- m->pwwn = port->pwwn;
- m->port_cfg = port->cfg;
- m->msgtag = port->msgtag;
- m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
- bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
- bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
- bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
+ bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
+ bfa_lpuid(fcport->bfa));
+ m->nwwn = fcport->nwwn;
+ m->pwwn = fcport->pwwn;
+ m->port_cfg = fcport->cfg;
+ m->msgtag = fcport->msgtag;
+ m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+ bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
+ bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
+ bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
/**
* queue I/O message to firmware
*/
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
return BFA_TRUE;
}
@@ -826,74 +1037,226 @@ bfa_pport_send_enable(struct bfa_pport_s *port)
* Send port disable message to firmware.
*/
static bfa_boolean_t
-bfa_pport_send_disable(struct bfa_pport_s *port)
+bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
{
- bfi_pport_disable_req_t *m;
+ struct bfi_fcport_req_s *m;
/**
* Increment message tag before queue check, so that responses to old
* requests are discarded.
*/
- port->msgtag++;
+ fcport->msgtag++;
/**
* check for room in queue to send request now
*/
- m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!m) {
- bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->reqq_wait);
return BFA_FALSE;
}
- bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
- bfa_lpuid(port->bfa));
- m->msgtag = port->msgtag;
+ bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
+ bfa_lpuid(fcport->bfa));
+ m->msgtag = fcport->msgtag;
/**
* queue I/O message to firmware
*/
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
return BFA_TRUE;
}
static void
-bfa_pport_set_wwns(struct bfa_pport_s *port)
+bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
{
- port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
- port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
+ fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
+ fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
- bfa_trc(port->bfa, port->pwwn);
- bfa_trc(port->bfa, port->nwwn);
+ bfa_trc(fcport->bfa, fcport->pwwn);
+ bfa_trc(fcport->bfa, fcport->nwwn);
}
static void
-bfa_port_send_txcredit(void *port_cbarg)
+bfa_fcport_send_txcredit(void *port_cbarg)
{
- struct bfa_pport_s *port = port_cbarg;
- struct bfi_pport_set_svc_params_req_s *m;
+ struct bfa_fcport_s *fcport = port_cbarg;
+ struct bfi_fcport_set_svc_params_req_s *m;
/**
* check for room in queue to send request now
*/
- m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
if (!m) {
- bfa_trc(port->bfa, port->cfg.tx_bbcredit);
+ bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
return;
}
- bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
- bfa_lpuid(port->bfa));
- m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
+ bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
+ bfa_lpuid(fcport->bfa));
+ m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
/**
* queue I/O message to firmware
*/
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
+static void
+bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
+ struct bfa_qos_stats_s *s)
+{
+ u32 *dip = (u32 *) d;
+ u32 *sip = (u32 *) s;
+ int i;
+
+ /* Now swap the 32 bit fields */
+ for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
+ dip[i] = bfa_os_ntohl(sip[i]);
+}
+static void
+bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
+ struct bfa_fcoe_stats_s *s)
+{
+ u32 *dip = (u32 *) d;
+ u32 *sip = (u32 *) s;
+ int i;
+
+ for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
+ i = i + 2) {
+#ifdef __BIGENDIAN
+ dip[i] = bfa_os_ntohl(sip[i]);
+ dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+#else
+ dip[i] = bfa_os_ntohl(sip[i + 1]);
+ dip[i + 1] = bfa_os_ntohl(sip[i]);
+#endif
+ }
+}
+
+static void
+__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcport_s *fcport = cbarg;
+
+ if (complete) {
+ if (fcport->stats_status == BFA_STATUS_OK) {
+
+ /* Swap FC QoS or FCoE stats */
+ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+ bfa_fcport_qos_stats_swap(
+ &fcport->stats_ret->fcqos,
+ &fcport->stats->fcqos);
+ else
+ bfa_fcport_fcoe_stats_swap(
+ &fcport->stats_ret->fcoe,
+ &fcport->stats->fcoe);
+ }
+ fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ } else {
+ fcport->stats_busy = BFA_FALSE;
+ fcport->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_fcport_stats_get_timeout(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+ bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+ if (fcport->stats_qfull) {
+ bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+ fcport->stats_qfull = BFA_FALSE;
+ }
+
+ fcport->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
+ fcport);
+}
+
+static void
+bfa_fcport_send_stats_get(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfi_fcport_req_s *msg;
+
+ msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ fcport->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&fcport->stats_reqq_wait,
+ bfa_fcport_send_stats_get, fcport);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->stats_reqq_wait);
+ return;
+ }
+ fcport->stats_qfull = BFA_FALSE;
+
+ bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
+ bfa_lpuid(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
+
+static void
+__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_fcport_s *fcport = cbarg;
+
+ if (complete) {
+ fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ } else {
+ fcport->stats_busy = BFA_FALSE;
+ fcport->stats_status = BFA_STATUS_OK;
+ }
+}
+
+static void
+bfa_fcport_stats_clr_timeout(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+ bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+ if (fcport->stats_qfull) {
+ bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+ fcport->stats_qfull = BFA_FALSE;
+ }
+
+ fcport->stats_status = BFA_STATUS_ETIMER;
+ bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+ __bfa_cb_fcport_stats_clr, fcport);
+}
+
+static void
+bfa_fcport_send_stats_clear(void *cbarg)
+{
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfi_fcport_req_s *msg;
+
+ msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+ if (!msg) {
+ fcport->stats_qfull = BFA_TRUE;
+ bfa_reqq_winit(&fcport->stats_reqq_wait,
+ bfa_fcport_send_stats_clear, fcport);
+ bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+ &fcport->stats_reqq_wait);
+ return;
+ }
+ fcport->stats_qfull = BFA_FALSE;
+
+ bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+ bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
+ bfa_lpuid(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
/**
* bfa_pport_public
@@ -903,32 +1266,32 @@ bfa_port_send_txcredit(void *port_cbarg)
* Firmware message handler.
*/
void
-bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
- union bfi_pport_i2h_msg_u i2hmsg;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ union bfi_fcport_i2h_msg_u i2hmsg;
i2hmsg.msg = msg;
- pport->event_arg.i2hmsg = i2hmsg;
+ fcport->event_arg.i2hmsg = i2hmsg;
switch (msg->mhdr.msg_id) {
- case BFI_PPORT_I2H_ENABLE_RSP:
- if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
- bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+ case BFI_FCPORT_I2H_ENABLE_RSP:
+ if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
break;
- case BFI_PPORT_I2H_DISABLE_RSP:
- if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
- bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+ case BFI_FCPORT_I2H_DISABLE_RSP:
+ if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
break;
- case BFI_PPORT_I2H_EVENT:
+ case BFI_FCPORT_I2H_EVENT:
switch (i2hmsg.event->link_state.linkstate) {
case BFA_PPORT_LINKUP:
- bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
break;
case BFA_PPORT_LINKDOWN:
- bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
+ bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
break;
case BFA_PPORT_TRUNK_LINKDOWN:
/** todo: event notification */
@@ -936,42 +1299,40 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
}
break;
- case BFI_PPORT_I2H_GET_STATS_RSP:
- case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
+ case BFI_FCPORT_I2H_STATS_GET_RSP:
/*
* check for timer pop before processing the rsp
*/
- if (pport->stats_busy == BFA_FALSE
- || pport->stats_status == BFA_STATUS_ETIMER)
+ if (fcport->stats_busy == BFA_FALSE ||
+ fcport->stats_status == BFA_STATUS_ETIMER)
break;
- bfa_timer_stop(&pport->timer);
- pport->stats_status = i2hmsg.getstats_rsp->status;
- bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
- pport);
+ bfa_timer_stop(&fcport->timer);
+ fcport->stats_status = i2hmsg.pstatsget_rsp->status;
+ bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+ __bfa_cb_fcport_stats_get, fcport);
break;
- case BFI_PPORT_I2H_CLEAR_STATS_RSP:
- case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
+
+ case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
/*
* check for timer pop before processing the rsp
*/
- if (pport->stats_busy == BFA_FALSE
- || pport->stats_status == BFA_STATUS_ETIMER)
+ if (fcport->stats_busy == BFA_FALSE ||
+ fcport->stats_status == BFA_STATUS_ETIMER)
break;
- bfa_timer_stop(&pport->timer);
- pport->stats_status = BFA_STATUS_OK;
- bfa_cb_queue(pport->bfa, &pport->hcb_qe,
- __bfa_cb_port_stats_clr, pport);
+ bfa_timer_stop(&fcport->timer);
+ fcport->stats_status = BFA_STATUS_OK;
+ bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+ __bfa_cb_fcport_stats_clr, fcport);
break;
default:
bfa_assert(0);
+ break;
}
}
-
-
/**
* bfa_pport_api
*/
@@ -980,35 +1341,35 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
* Registered callback for port events.
*/
void
-bfa_pport_event_register(struct bfa_s *bfa,
+bfa_fcport_event_register(struct bfa_s *bfa,
void (*cbfn) (void *cbarg, bfa_pport_event_t event),
void *cbarg)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- pport->event_cbfn = cbfn;
- pport->event_cbarg = cbarg;
+ fcport->event_cbfn = cbfn;
+ fcport->event_cbarg = cbarg;
}
bfa_status_t
-bfa_pport_enable(struct bfa_s *bfa)
+bfa_fcport_enable(struct bfa_s *bfa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (pport->diag_busy)
+ if (fcport->diag_busy)
return BFA_STATUS_DIAG_BUSY;
else if (bfa_sm_cmp_state
- (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
+ (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
return BFA_STATUS_DEVBUSY;
- bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
return BFA_STATUS_OK;
}
bfa_status_t
-bfa_pport_disable(struct bfa_s *bfa)
+bfa_fcport_disable(struct bfa_s *bfa)
{
- bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
+ bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
return BFA_STATUS_OK;
}
@@ -1016,18 +1377,18 @@ bfa_pport_disable(struct bfa_s *bfa)
* Configure port speed.
*/
bfa_status_t
-bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, speed);
- if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
- bfa_trc(bfa, pport->speed_sup);
+ if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
+ bfa_trc(bfa, fcport->speed_sup);
return BFA_STATUS_UNSUPP_SPEED;
}
- pport->cfg.speed = speed;
+ fcport->cfg.speed = speed;
return BFA_STATUS_OK;
}
@@ -1036,23 +1397,23 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
* Get current speed.
*/
enum bfa_pport_speed
-bfa_pport_get_speed(struct bfa_s *bfa)
+bfa_fcport_get_speed(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->speed;
+ return fcport->speed;
}
/**
* Configure port topology.
*/
bfa_status_t
-bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
+bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, topology);
- bfa_trc(bfa, pport->cfg.topology);
+ bfa_trc(bfa, fcport->cfg.topology);
switch (topology) {
case BFA_PPORT_TOPOLOGY_P2P:
@@ -1064,7 +1425,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
return BFA_STATUS_EINVAL;
}
- pport->cfg.topology = topology;
+ fcport->cfg.topology = topology;
return BFA_STATUS_OK;
}
@@ -1072,64 +1433,64 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
* Get current topology.
*/
enum bfa_pport_topology
-bfa_pport_get_topology(struct bfa_s *bfa)
+bfa_fcport_get_topology(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->topology;
+ return fcport->topology;
}
bfa_status_t
-bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, alpa);
- bfa_trc(bfa, pport->cfg.cfg_hardalpa);
- bfa_trc(bfa, pport->cfg.hardalpa);
+ bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, fcport->cfg.hardalpa);
- pport->cfg.cfg_hardalpa = BFA_TRUE;
- pport->cfg.hardalpa = alpa;
+ fcport->cfg.cfg_hardalpa = BFA_TRUE;
+ fcport->cfg.hardalpa = alpa;
return BFA_STATUS_OK;
}
bfa_status_t
-bfa_pport_clr_hardalpa(struct bfa_s *bfa)
+bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- bfa_trc(bfa, pport->cfg.cfg_hardalpa);
- bfa_trc(bfa, pport->cfg.hardalpa);
+ bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+ bfa_trc(bfa, fcport->cfg.hardalpa);
- pport->cfg.cfg_hardalpa = BFA_FALSE;
+ fcport->cfg.cfg_hardalpa = BFA_FALSE;
return BFA_STATUS_OK;
}
bfa_boolean_t
-bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- *alpa = port->cfg.hardalpa;
- return port->cfg.cfg_hardalpa;
+ *alpa = fcport->cfg.hardalpa;
+ return fcport->cfg.cfg_hardalpa;
}
u8
-bfa_pport_get_myalpa(struct bfa_s *bfa)
+bfa_fcport_get_myalpa(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->myalpa;
+ return fcport->myalpa;
}
bfa_status_t
-bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, maxfrsize);
- bfa_trc(bfa, pport->cfg.maxfrsize);
+ bfa_trc(bfa, fcport->cfg.maxfrsize);
/*
* with in range
@@ -1143,41 +1504,41 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
return BFA_STATUS_INVLD_DFSZ;
- pport->cfg.maxfrsize = maxfrsize;
+ fcport->cfg.maxfrsize = maxfrsize;
return BFA_STATUS_OK;
}
u16
-bfa_pport_get_maxfrsize(struct bfa_s *bfa)
+bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->cfg.maxfrsize;
+ return fcport->cfg.maxfrsize;
}
u32
-bfa_pport_mypid(struct bfa_s *bfa)
+bfa_fcport_mypid(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->mypid;
+ return fcport->mypid;
}
u8
-bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
+bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return port->cfg.rx_bbcredit;
+ return fcport->cfg.rx_bbcredit;
}
void
-bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- port->cfg.tx_bbcredit = (u8) tx_bbcredit;
- bfa_port_send_txcredit(port);
+ fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
+ bfa_fcport_send_txcredit(fcport);
}
/**
@@ -1185,302 +1546,192 @@ bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
*/
wwn_t
-bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
if (node)
- return pport->nwwn;
+ return fcport->nwwn;
else
- return pport->pwwn;
+ return fcport->pwwn;
}
void
-bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
+bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
- attr->nwwn = pport->nwwn;
- attr->pwwn = pport->pwwn;
+ attr->nwwn = fcport->nwwn;
+ attr->pwwn = fcport->pwwn;
- bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
+ bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
sizeof(struct bfa_pport_cfg_s));
/*
* speed attributes
*/
- attr->pport_cfg.speed = pport->cfg.speed;
- attr->speed_supported = pport->speed_sup;
- attr->speed = pport->speed;
+ attr->pport_cfg.speed = fcport->cfg.speed;
+ attr->speed_supported = fcport->speed_sup;
+ attr->speed = fcport->speed;
attr->cos_supported = FC_CLASS_3;
/*
* topology attributes
*/
- attr->pport_cfg.topology = pport->cfg.topology;
- attr->topology = pport->topology;
+ attr->pport_cfg.topology = fcport->cfg.topology;
+ attr->topology = fcport->topology;
/*
* beacon attributes
*/
- attr->beacon = pport->beacon;
- attr->link_e2e_beacon = pport->link_e2e_beacon;
- attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
+ attr->beacon = fcport->beacon;
+ attr->link_e2e_beacon = fcport->link_e2e_beacon;
+ attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
- attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
- if (bfa_ioc_is_disabled(&pport->bfa->ioc))
+ attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
+ if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
attr->port_state = BFA_PPORT_ST_IOCDIS;
- else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
+ else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
attr->port_state = BFA_PPORT_ST_FWMISMATCH;
}
-static void
-bfa_port_stats_query(void *cbarg)
-{
- struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
- bfi_pport_get_stats_req_t *msg;
-
- msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
-
- if (!msg) {
- port->stats_qfull = BFA_TRUE;
- bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
- port);
- bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
- return;
- }
- port->stats_qfull = BFA_FALSE;
-
- bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
- bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
- bfa_lpuid(port->bfa));
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
-
- return;
-}
-
-static void
-bfa_port_stats_clear(void *cbarg)
-{
- struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
- bfi_pport_clear_stats_req_t *msg;
-
- msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+#define BFA_FCPORT_STATS_TOV 1000
- if (!msg) {
- port->stats_qfull = BFA_TRUE;
- bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
- port);
- bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
- return;
- }
- port->stats_qfull = BFA_FALSE;
-
- bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
- bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
- bfa_lpuid(port->bfa));
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
- return;
-}
-
-static void
-bfa_port_qos_stats_clear(void *cbarg)
+/**
+ * Fetch port attributes (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
- bfi_pport_clear_qos_stats_req_t *msg;
-
- msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (!msg) {
- port->stats_qfull = BFA_TRUE;
- bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
- port);
- bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
- return;
+ if (fcport->stats_busy) {
+ bfa_trc(bfa, fcport->stats_busy);
+ return BFA_STATUS_DEVBUSY;
}
- port->stats_qfull = BFA_FALSE;
- bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
- bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
- bfa_lpuid(port->bfa));
- bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
- return;
-}
-
-static void
-bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
-{
- u32 *dip = (u32 *) d;
- u32 *sip = (u32 *) s;
- int i;
+ fcport->stats_busy = BFA_TRUE;
+ fcport->stats_ret = stats;
+ fcport->stats_cbfn = cbfn;
+ fcport->stats_cbarg = cbarg;
- /*
- * Do 64 bit fields swap first
- */
- for (i = 0;
- i <
- ((sizeof(union bfa_pport_stats_u) -
- sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
-#ifdef __BIGENDIAN
- dip[i] = bfa_os_ntohl(sip[i]);
- dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
-#else
- dip[i] = bfa_os_ntohl(sip[i + 1]);
- dip[i + 1] = bfa_os_ntohl(sip[i]);
-#endif
- }
+ bfa_fcport_send_stats_get(fcport);
- /*
- * Now swap the 32 bit fields
- */
- for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
- dip[i] = bfa_os_ntohl(sip[i]);
+ bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ return BFA_STATUS_OK;
}
-static void
-__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
+/**
+ * Reset port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = cbarg;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (complete) {
- port->stats_cbfn(port->stats_cbarg, port->stats_status);
- } else {
- port->stats_busy = BFA_FALSE;
- port->stats_status = BFA_STATUS_OK;
+ if (fcport->stats_busy) {
+ bfa_trc(bfa, fcport->stats_busy);
+ return BFA_STATUS_DEVBUSY;
}
-}
-
-static void
-bfa_port_stats_clr_timeout(void *cbarg)
-{
- struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
- bfa_trc(port->bfa, port->stats_qfull);
+ fcport->stats_busy = BFA_TRUE;
+ fcport->stats_cbfn = cbfn;
+ fcport->stats_cbarg = cbarg;
- if (port->stats_qfull) {
- bfa_reqq_wcancel(&port->stats_reqq_wait);
- port->stats_qfull = BFA_FALSE;
- }
+ bfa_fcport_send_stats_clear(fcport);
- port->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
+ bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ return BFA_STATUS_OK;
}
-static void
-__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
+/**
+ * Fetch FCQoS port statistics
+ */
+bfa_status_t
+bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = cbarg;
+ /* Meaningful only for FC mode */
+ bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
- if (complete) {
- if (port->stats_status == BFA_STATUS_OK)
- bfa_pport_stats_swap(port->stats_ret, port->stats);
- port->stats_cbfn(port->stats_cbarg, port->stats_status);
- } else {
- port->stats_busy = BFA_FALSE;
- port->stats_status = BFA_STATUS_OK;
- }
+ return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
-static void
-bfa_port_stats_timeout(void *cbarg)
+/**
+ * Reset FCoE port statistics
+ */
+bfa_status_t
+bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-
- bfa_trc(port->bfa, port->stats_qfull);
+ /* Meaningful only for FC mode */
+ bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
- if (port->stats_qfull) {
- bfa_reqq_wcancel(&port->stats_reqq_wait);
- port->stats_qfull = BFA_FALSE;
- }
-
- port->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
+ return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
}
-#define BFA_PORT_STATS_TOV 1000
-
/**
- * Fetch port attributes.
+ * Fetch FCQoS port statistics
*/
bfa_status_t
-bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
- bfa_cb_pport_t cbfn, void *cbarg)
+bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
-
- if (port->stats_busy) {
- bfa_trc(bfa, port->stats_busy);
- return BFA_STATUS_DEVBUSY;
- }
-
- port->stats_busy = BFA_TRUE;
- port->stats_ret = stats;
- port->stats_cbfn = cbfn;
- port->stats_cbarg = cbarg;
-
- bfa_port_stats_query(port);
+ /* Meaningful only for FCoE mode */
+ bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
- bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
- BFA_PORT_STATS_TOV);
- return BFA_STATUS_OK;
+ return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
}
+/**
+ * Reset FCoE port statistics
+ */
bfa_status_t
-bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
-
- if (port->stats_busy) {
- bfa_trc(bfa, port->stats_busy);
- return BFA_STATUS_DEVBUSY;
- }
-
- port->stats_busy = BFA_TRUE;
- port->stats_cbfn = cbfn;
- port->stats_cbarg = cbarg;
-
- bfa_port_stats_clear(port);
+ /* Meaningful only for FCoE mode */
+ bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
- bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
- BFA_PORT_STATS_TOV);
- return BFA_STATUS_OK;
+ return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
}
bfa_status_t
-bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
+bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, bitmap);
- bfa_trc(bfa, pport->cfg.trunked);
- bfa_trc(bfa, pport->cfg.trunk_ports);
+ bfa_trc(bfa, fcport->cfg.trunked);
+ bfa_trc(bfa, fcport->cfg.trunk_ports);
if (!bitmap || (bitmap & (bitmap - 1)))
return BFA_STATUS_EINVAL;
- pport->cfg.trunked = BFA_TRUE;
- pport->cfg.trunk_ports = bitmap;
+ fcport->cfg.trunked = BFA_TRUE;
+ fcport->cfg.trunk_ports = bitmap;
return BFA_STATUS_OK;
}
void
-bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
+bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
- qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
+ qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
+ qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
}
void
-bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
+bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
struct bfa_qos_vc_attr_s *qos_vc_attr)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
- struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
u32 i = 0;
qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
@@ -1503,119 +1754,89 @@ bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
}
/**
- * Fetch QoS Stats.
- */
-bfa_status_t
-bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
- bfa_cb_pport_t cbfn, void *cbarg)
-{
- /*
- * QoS stats is embedded in port stats
- */
- return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-bfa_status_t
-bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
-{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
-
- if (port->stats_busy) {
- bfa_trc(bfa, port->stats_busy);
- return BFA_STATUS_DEVBUSY;
- }
-
- port->stats_busy = BFA_TRUE;
- port->stats_cbfn = cbfn;
- port->stats_cbarg = cbarg;
-
- bfa_port_qos_stats_clear(port);
-
- bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
- BFA_PORT_STATS_TOV);
- return BFA_STATUS_OK;
-}
-
-/**
* Fetch port attributes.
*/
bfa_status_t
-bfa_pport_trunk_disable(struct bfa_s *bfa)
+bfa_fcport_trunk_disable(struct bfa_s *bfa)
{
return BFA_STATUS_OK;
}
bfa_boolean_t
-bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
+bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- *bitmap = port->cfg.trunk_ports;
- return port->cfg.trunked;
+ *bitmap = fcport->cfg.trunk_ports;
+ return fcport->cfg.trunked;
}
bfa_boolean_t
-bfa_pport_is_disabled(struct bfa_s *bfa)
+bfa_fcport_is_disabled(struct bfa_s *bfa)
{
- struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
+ return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
BFA_PPORT_ST_DISABLED;
}
bfa_boolean_t
-bfa_pport_is_ratelim(struct bfa_s *bfa)
+bfa_fcport_is_ratelim(struct bfa_s *bfa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
+ return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
}
void
-bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
+bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
bfa_trc(bfa, on_off);
- bfa_trc(bfa, pport->cfg.qos_enabled);
+ bfa_trc(bfa, fcport->cfg.qos_enabled);
+
+ bfa_trc(bfa, ioc_type);
- pport->cfg.qos_enabled = on_off;
+ if (ioc_type == BFA_IOC_TYPE_FC)
+ fcport->cfg.qos_enabled = on_off;
}
void
-bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
+bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, on_off);
- bfa_trc(bfa, pport->cfg.ratelimit);
+ bfa_trc(bfa, fcport->cfg.ratelimit);
- pport->cfg.ratelimit = on_off;
- if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
- pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
+ fcport->cfg.ratelimit = on_off;
+ if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
+ fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
}
/**
* Configure default minimum ratelim speed
*/
bfa_status_t
-bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, speed);
/*
* Auto and speeds greater than the supported speed, are invalid
*/
- if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
- bfa_trc(bfa, pport->speed_sup);
+ if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
+ bfa_trc(bfa, fcport->speed_sup);
return BFA_STATUS_UNSUPP_SPEED;
}
- pport->cfg.trl_def_speed = speed;
+ fcport->cfg.trl_def_speed = speed;
return BFA_STATUS_OK;
}
@@ -1624,45 +1845,45 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
* Get default minimum ratelim speed
*/
enum bfa_pport_speed
-bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
+bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- bfa_trc(bfa, pport->cfg.trl_def_speed);
- return pport->cfg.trl_def_speed;
+ bfa_trc(bfa, fcport->cfg.trl_def_speed);
+ return fcport->cfg.trl_def_speed;
}
void
-bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
+bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, status);
- bfa_trc(bfa, pport->diag_busy);
+ bfa_trc(bfa, fcport->diag_busy);
- pport->diag_busy = status;
+ fcport->diag_busy = status;
}
void
-bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
+bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon)
{
- struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
bfa_trc(bfa, beacon);
bfa_trc(bfa, link_e2e_beacon);
- bfa_trc(bfa, pport->beacon);
- bfa_trc(bfa, pport->link_e2e_beacon);
+ bfa_trc(bfa, fcport->beacon);
+ bfa_trc(bfa, fcport->link_e2e_beacon);
- pport->beacon = beacon;
- pport->link_e2e_beacon = link_e2e_beacon;
+ fcport->beacon = beacon;
+ fcport->link_e2e_beacon = link_e2e_beacon;
}
bfa_boolean_t
-bfa_pport_is_linkup(struct bfa_s *bfa)
+bfa_fcport_is_linkup(struct bfa_s *bfa)
{
- return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
+ return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 7cb39a306ea9..3516172c597c 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -36,6 +36,7 @@
* FCS sub-modules
*/
struct bfa_fcs_mod_s {
+ void (*attach) (struct bfa_fcs_s *fcs);
void (*modinit) (struct bfa_fcs_s *fcs);
void (*modexit) (struct bfa_fcs_s *fcs);
};
@@ -43,12 +44,10 @@ struct bfa_fcs_mod_s {
#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
static struct bfa_fcs_mod_s fcs_modules[] = {
- BFA_FCS_MODULE(bfa_fcs_pport),
- BFA_FCS_MODULE(bfa_fcs_uf),
- BFA_FCS_MODULE(bfa_fcs_fabric),
- BFA_FCS_MODULE(bfa_fcs_vport),
- BFA_FCS_MODULE(bfa_fcs_rport),
- BFA_FCS_MODULE(bfa_fcs_fcpim),
+ { bfa_fcs_pport_attach, NULL, NULL },
+ { bfa_fcs_uf_attach, NULL, NULL },
+ { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
+ bfa_fcs_fabric_modexit },
};
/**
@@ -71,16 +70,10 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
*/
/**
- * FCS instance initialization.
- *
- * param[in] fcs FCS instance
- * param[in] bfa BFA instance
- * param[in] bfad BFA driver instance
- *
- * return None
+ * fcs attach -- called once to initialize data structures at driver attach time
*/
void
-bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
bfa_boolean_t min_cfg)
{
int i;
@@ -95,7 +88,24 @@ bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
mod = &fcs_modules[i];
- mod->modinit(fcs);
+ if (mod->attach)
+ mod->attach(fcs);
+ }
+}
+
+/**
+ * fcs initialization, called once after bfa initialization is complete
+ */
+void
+bfa_fcs_init(struct bfa_fcs_s *fcs)
+{
+ int i;
+ struct bfa_fcs_mod_s *mod;
+
+ for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+ mod = &fcs_modules[i];
+ if (mod->modinit)
+ mod->modinit(fcs);
}
}
@@ -127,6 +137,23 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
}
/**
+ * @brief
+ * FCS FDMI Driver Parameter Initialization
+ *
+ * @param[in] fcs FCS instance
+ * @param[in] fdmi_enable TRUE/FALSE
+ *
+ * @return None
+ */
+void
+bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
+{
+
+ fcs->fdmi_enabled = fdmi_enable;
+
+}
+
+/**
* FCS instance cleanup and exit.
*
* param[in] fcs FCS instance
@@ -143,10 +170,12 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
for (i = 0; i < nmods; i++) {
- bfa_wc_up(&fcs->wc);
mod = &fcs_modules[i];
- mod->modexit(fcs);
+ if (mod->modexit) {
+ bfa_wc_up(&fcs->wc);
+ mod->modexit(fcs);
+ }
}
bfa_wc_wait(&fcs->wc);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index c7ab257f10a7..7c1251c682d8 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -114,7 +114,7 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -136,7 +136,7 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -176,7 +176,7 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -214,7 +214,7 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -234,7 +234,7 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -263,30 +263,8 @@ bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port,
bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
- switch (event) {
- case BFA_LPORT_AEN_ONLINE:
- bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_OFFLINE:
- bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_NEW:
- bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_DELETE:
- bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_DISCONNECT:
- bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
- role_str[role / 2]);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
+ role_str[role/2]);
aen_data.lport.vf_id = port->fabric->vf_id;
aen_data.lport.roles = role;
@@ -873,36 +851,46 @@ bfa_fcs_port_is_online(struct bfa_fcs_port_s *port)
}
/**
- * Logical port initialization of base or virtual port.
- * Called by fabric for base port or by vport for virtual ports.
+ * Attach time initialization of logical ports.
*/
void
-bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
- u16 vf_id, struct bfa_port_cfg_s *port_cfg,
- struct bfa_fcs_vport_s *vport)
+bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
+ uint16_t vf_id, struct bfa_fcs_vport_s *vport)
{
lport->fcs = fcs;
lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
- bfa_os_assign(lport->port_cfg, *port_cfg);
lport->vport = vport;
lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
bfa_lps_get_tag(lport->fabric->lps);
INIT_LIST_HEAD(&lport->rport_q);
lport->num_rports = 0;
+}
+
+/**
+ * Logical port initialization of base or virtual port.
+ * Called by fabric for base port or by vport for virtual ports.
+ */
- lport->bfad_port =
- bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles,
+void
+bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
+ struct bfa_port_cfg_s *port_cfg)
+{
+ struct bfa_fcs_vport_s *vport = lport->vport;
+
+ bfa_os_assign(lport->port_cfg, *port_cfg);
+
+ lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport,
+ lport->port_cfg.roles,
lport->fabric->vf_drv,
vport ? vport->vport_drv : NULL);
+
bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
}
-
-
/**
* fcs_lport_api
*/
@@ -921,13 +909,20 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port,
if (port->fabric) {
port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+ port_attr->authfail =
+ bfa_fcs_fabric_is_auth_failed(port->fabric);
port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
memcpy(port_attr->fabric_ip_addr,
bfa_fcs_port_get_fabric_ipaddr(port),
BFA_FCS_FABRIC_IPADDR_SZ);
- if (port->vport != NULL)
+ if (port->vport != NULL) {
port_attr->port_type = BFA_PPORT_TYPE_VPORT;
+ port_attr->fpma_mac =
+ bfa_lps_get_lp_mac(port->vport->lps);
+ } else
+ port_attr->fpma_mac =
+ bfa_lps_get_lp_mac(port->fabric->lps);
} else {
port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
index 9c4b24e62de1..3c27788cd527 100644
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -55,14 +55,7 @@ bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
}
void
-bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
{
- bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler,
- fcs);
-}
-
-void
-bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
-{
- bfa_fcs_modexit_comp(fcs);
+ bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
}
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
index ad01db6444b2..3d57d48bbae4 100644
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -93,13 +93,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
}
void
-bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
{
bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
}
-
-void
-bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
-{
- bfa_fcs_modexit_comp(fcs);
-}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index ede1438619e2..871a4e28575c 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -53,6 +53,18 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
}
void
+bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+}
+
+static void
+bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
+{
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
+ __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
+}
+
+void
bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
{
}
@@ -136,6 +148,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
void
bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
{
+ bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 51ae5740e6e9..76ceb9a4bf2f 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -85,6 +85,15 @@ bfa_hwct_reginit(struct bfa_s *bfa)
}
void
+bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+ u32 r32;
+
+ r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+ bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
+}
+
+void
bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
{
u32 r32;
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index b36540e4ed76..0eba3f930d5b 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -15,7 +15,7 @@
* General Public License for more details.
*/
#include <bfa.h>
-#include <bfi/bfi_cbreg.h>
+#include <bfi/bfi_ctreg.h>
#include <bfa_port_priv.h>
#include <bfa_intr_priv.h>
#include <cs/bfa_debug.h>
@@ -34,6 +34,26 @@ bfa_msix_lpu(struct bfa_s *bfa)
bfa_ioc_mbox_isr(&bfa->ioc);
}
+static void
+bfa_reqq_resume(struct bfa_s *bfa, int qid)
+{
+ struct list_head *waitq, *qe, *qen;
+ struct bfa_reqq_wait_s *wqe;
+
+ waitq = bfa_reqq(bfa, qid);
+ list_for_each_safe(qe, qen, waitq) {
+ /**
+ * Callback only as long as there is room in request queue
+ */
+ if (bfa_reqq_full(bfa, qid))
+ break;
+
+ list_del(qe);
+ wqe = (struct bfa_reqq_wait_s *) qe;
+ wqe->qresume(wqe->cbarg);
+ }
+}
+
void
bfa_msix_all(struct bfa_s *bfa, int vec)
{
@@ -96,7 +116,8 @@ bfa_isr_enable(struct bfa_s *bfa)
bfa_msix_install(bfa);
intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
- __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+ __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
+ __HFN_INT_LL_HALT);
if (pci_func == 0)
intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
@@ -127,23 +148,18 @@ bfa_isr_disable(struct bfa_s *bfa)
void
bfa_msix_reqq(struct bfa_s *bfa, int qid)
{
- struct list_head *waitq, *qe, *qen;
- struct bfa_reqq_wait_s *wqe;
+ struct list_head *waitq;
qid &= (BFI_IOC_MAX_CQS - 1);
- waitq = bfa_reqq(bfa, qid);
- list_for_each_safe(qe, qen, waitq) {
- /**
- * Callback only as long as there is room in request queue
- */
- if (bfa_reqq_full(bfa, qid))
- break;
+ bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
- list_del(qe);
- wqe = (struct bfa_reqq_wait_s *) qe;
- wqe->qresume(wqe->cbarg);
- }
+ /**
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
}
void
@@ -157,26 +173,27 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
}
void
-bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
+bfa_msix_rspq(struct bfa_s *bfa, int qid)
{
- struct bfi_msg_s *m;
- u32 pi, ci;
+ struct bfi_msg_s *m;
+ u32 pi, ci;
+ struct list_head *waitq;
- bfa_trc_fp(bfa, rsp_qid);
+ bfa_trc_fp(bfa, qid);
- rsp_qid &= (BFI_IOC_MAX_CQS - 1);
+ qid &= (BFI_IOC_MAX_CQS - 1);
- bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);
+ bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
- ci = bfa_rspq_ci(bfa, rsp_qid);
- pi = bfa_rspq_pi(bfa, rsp_qid);
+ ci = bfa_rspq_ci(bfa, qid);
+ pi = bfa_rspq_pi(bfa, qid);
bfa_trc_fp(bfa, ci);
bfa_trc_fp(bfa, pi);
if (bfa->rme_process) {
while (ci != pi) {
- m = bfa_rspq_elem(bfa, rsp_qid, ci);
+ m = bfa_rspq_elem(bfa, qid, ci);
bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
bfa_isrs[m->mhdr.msg_class] (bfa, m);
@@ -188,25 +205,59 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
/**
* update CI
*/
- bfa_rspq_ci(bfa, rsp_qid) = pi;
- bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
+ bfa_rspq_ci(bfa, qid) = pi;
+ bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
bfa_os_mmiowb();
+
+ /**
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
}
void
bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
{
- u32 intr;
+ u32 intr, curr_value;
intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
bfa_msix_lpu(bfa);
- if (intr & (__HFN_INT_ERR_EMC |
- __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 |
- __HFN_INT_ERR_PSS))
+ intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+ __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
+
+ if (intr) {
+ if (intr & __HFN_INT_LL_HALT) {
+ /**
+ * If LL_HALT bit is set then FW Init Halt LL Port
+ * Register needs to be cleared as well so Interrupt
+ * Status Register will be cleared.
+ */
+ curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
+ curr_value &= ~__FW_INIT_HALT_P;
+ bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
+ }
+
+ if (intr & __HFN_INT_ERR_PSS) {
+ /**
+ * ERR_PSS bit needs to be cleared as well in case
+ * interrups are shared so driver's interrupt handler is
+ * still called eventhough it is already masked out.
+ */
+ curr_value = bfa_reg_read(
+ bfa->ioc.ioc_regs.pss_err_status_reg);
+ curr_value &= __PSS_ERR_STATUS_SET;
+ bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
+ curr_value);
+ }
+
+ bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
bfa_msix_errint(bfa, intr);
+ }
}
void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 397d7e9eade5..e038bc9769f6 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -18,7 +18,7 @@
#include <bfa.h>
#include <bfa_ioc.h>
#include <bfa_fwimg_priv.h>
-#include <bfa_trcmod_priv.h>
+#include <cna/bfa_cna_trcmod.h>
#include <cs/bfa_debug.h>
#include <bfi/bfi_ioc.h>
#include <bfi/bfi_ctreg.h>
@@ -27,18 +27,17 @@
#include <log/bfa_log_hal.h>
#include <defs/bfa_defs_pci.h>
-BFA_TRC_FILE(HAL, IOC);
+BFA_TRC_FILE(CNA, IOC);
/**
* IOC local definitions
*/
#define BFA_IOC_TOV 2000 /* msecs */
-#define BFA_IOC_HB_TOV 1000 /* msecs */
-#define BFA_IOC_HB_FAIL_MAX 4
-#define BFA_IOC_HWINIT_MAX 2
+#define BFA_IOC_HWSEM_TOV 500 /* msecs */
+#define BFA_IOC_HB_TOV 500 /* msecs */
+#define BFA_IOC_HWINIT_MAX 2
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
-#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
- + BFA_IOC_TOV)
+#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -51,12 +50,25 @@ BFA_TRC_FILE(HAL, IOC);
(sizeof(struct bfa_trc_mod_s) - \
BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
-#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
-#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
-#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
-#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-bfa_boolean_t bfa_auto_recover = BFA_FALSE;
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
+ ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
+#define bfa_ioc_fwimg_get_size(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_hbfail(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+
+bfa_boolean_t bfa_auto_recover = BFA_TRUE;
/*
* forward declarations
@@ -64,7 +76,6 @@ bfa_boolean_t bfa_auto_recover = BFA_FALSE;
static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
enum bfa_ioc_aen_event event);
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
@@ -77,8 +88,6 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
-static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
@@ -508,14 +517,19 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_trc(ioc, event);
switch (event) {
- case IOC_E_HWERROR:
case IOC_E_FWRSP_DISABLE:
bfa_ioc_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+ break;
+
+ case IOC_E_HWERROR:
+ bfa_ioc_timer_stop(ioc);
/*
* !!! fall through !!!
*/
case IOC_E_TIMEOUT:
+ bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
break;
@@ -608,15 +622,12 @@ bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
* Mark IOC as failed in hardware and stop firmware.
*/
bfa_ioc_lpu_stop(ioc);
- bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
+ bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
- if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
- bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
- /*
- * Wait for halt to take effect
- */
- bfa_reg_read(ioc->ioc_regs.ll_halt);
- }
+ /**
+ * Notify other functions on HB failure.
+ */
+ bfa_ioc_notify_hbfail(ioc);
/**
* Notify driver and common modules registered for notification.
@@ -672,6 +683,12 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
*/
break;
+ case IOC_E_HWERROR:
+ /*
+ * HB failure notification, ignore.
+ */
+ break;
+
default:
bfa_sm_fault(ioc, event);
}
@@ -700,7 +717,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
}
}
-static void
+void
bfa_ioc_sem_timeout(void *ioc_arg)
{
struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
@@ -708,26 +725,32 @@ bfa_ioc_sem_timeout(void *ioc_arg)
bfa_ioc_hw_sem_get(ioc);
}
-static void
-bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
+bfa_boolean_t
+bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
{
- u32 r32;
- int cnt = 0;
-#define BFA_SEM_SPINCNT 1000
+ u32 r32;
+ int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
- do {
- r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
+ r32 = bfa_reg_read(sem_reg);
+
+ while (r32 && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
- if (cnt > BFA_SEM_SPINCNT)
- break;
- } while (r32 != 0);
+ bfa_os_udelay(2);
+ r32 = bfa_reg_read(sem_reg);
+ }
+
+ if (r32 == 0)
+ return BFA_TRUE;
+
bfa_assert(cnt < BFA_SEM_SPINCNT);
+ return BFA_FALSE;
}
-static void
-bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
+void
+bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
{
- bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
+ bfa_reg_write(sem_reg, 1);
}
static void
@@ -737,7 +760,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
/**
* First read to the semaphore register will return 0, subsequent reads
- * will return 1. Semaphore is released by writing 0 to the register
+ * will return 1. Semaphore is released by writing 1 to the register
*/
r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
if (r32 == 0) {
@@ -746,10 +769,10 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
}
bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
- ioc, BFA_IOC_TOV);
+ ioc, BFA_IOC_HWSEM_TOV);
}
-static void
+void
bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
{
bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
@@ -828,7 +851,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
/**
* Get driver and firmware versions.
*/
-static void
+void
bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
u32 pgnum, pgoff;
@@ -847,24 +870,10 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
}
}
-static u32 *
-bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
-{
- if (ioc->ctdev)
- return bfi_image_ct_get_chunk(off);
- return bfi_image_cb_get_chunk(off);
-}
-
-static u32
-bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
-{
-return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
-}
-
/**
* Returns TRUE if same.
*/
-static bfa_boolean_t
+bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
{
struct bfi_ioc_image_hdr_s *drv_fwhdr;
@@ -921,95 +930,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
}
/**
- * Return true if firmware of current driver matches the running firmware.
- */
-static bfa_boolean_t
-bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
-{
- enum bfi_ioc_state ioc_fwstate;
- u32 usecnt;
- struct bfi_ioc_image_hdr_s fwhdr;
-
- /**
- * Firmware match check is relevant only for CNA.
- */
- if (!ioc->cna)
- return BFA_TRUE;
-
- /**
- * If bios boot (flash based) -- do not increment usage count
- */
- if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
- return BFA_TRUE;
-
- bfa_ioc_usage_sem_get(ioc);
- usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
-
- /**
- * If usage count is 0, always return TRUE.
- */
- if (usecnt == 0) {
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
- bfa_ioc_usage_sem_release(ioc);
- bfa_trc(ioc, usecnt);
- return BFA_TRUE;
- }
-
- ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
- bfa_trc(ioc, ioc_fwstate);
-
- /**
- * Use count cannot be non-zero and chip in uninitialized state.
- */
- bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
-
- /**
- * Check if another driver with a different firmware is active
- */
- bfa_ioc_fwver_get(ioc, &fwhdr);
- if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
- bfa_ioc_usage_sem_release(ioc);
- bfa_trc(ioc, usecnt);
- return BFA_FALSE;
- }
-
- /**
- * Same firmware version. Increment the reference count.
- */
- usecnt++;
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
- bfa_ioc_usage_sem_release(ioc);
- bfa_trc(ioc, usecnt);
- return BFA_TRUE;
-}
-
-static void
-bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
-{
- u32 usecnt;
-
- /**
- * Firmware lock is relevant only for CNA.
- * If bios boot (flash based) -- do not decrement usage count
- */
- if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
- return;
-
- /**
- * decrement usage count
- */
- bfa_ioc_usage_sem_get(ioc);
- usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
- bfa_assert(usecnt > 0);
-
- usecnt--;
- bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
- bfa_trc(ioc, usecnt);
-
- bfa_ioc_usage_sem_release(ioc);
-}
-
-/**
* Conditionally flush any pending message from firmware at start.
*/
static void
@@ -1152,33 +1072,27 @@ bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
static void
bfa_ioc_hb_check(void *cbarg)
{
- struct bfa_ioc_s *ioc = cbarg;
- u32 hb_count;
+ struct bfa_ioc_s *ioc = cbarg;
+ u32 hb_count;
hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
- ioc->hb_fail++;
- } else {
- ioc->hb_count = hb_count;
- ioc->hb_fail = 0;
- }
-
- if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
- bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
- ioc->hb_fail = 0;
+ bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
+ hb_count);
bfa_ioc_recover(ioc);
return;
+ } else {
+ ioc->hb_count = hb_count;
}
bfa_ioc_mbox_poll(ioc);
- bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
- BFA_IOC_HB_TOV);
+ bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
+ ioc, BFA_IOC_HB_TOV);
}
static void
bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
{
- ioc->hb_fail = 0;
ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
BFA_IOC_HB_TOV);
@@ -1191,112 +1105,6 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
}
/**
- * Host to LPU mailbox message addresses
- */
-static struct {
- u32 hfn_mbox, lpu_mbox, hfn_pgn;
-} iocreg_fnreg[] = {
- {
- HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
- HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
- HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
- HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
-static struct {
- u32 hfn, lpu;
-} iocreg_mbcmd_p0[] = {
- {
- HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
- HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
- HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
- HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
-static struct {
- u32 hfn, lpu;
-} iocreg_mbcmd_p1[] = {
- {
- HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
- HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
- HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
- HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
-};
-
-/**
- * Shared IRQ handling in INTX mode
- */
-static struct {
- u32 isr, msk;
-} iocreg_shirq_next[] = {
- {
- HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
- HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
- HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
-HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
-
-static void
-bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
-{
- bfa_os_addr_t rb;
- int pcifn = bfa_ioc_pcifn(ioc);
-
- rb = bfa_ioc_bar0(ioc);
-
- ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
- ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
- ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
-
- if (ioc->port_id == 0) {
- ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
- ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
- ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
- } else {
- ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
- ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
- ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
- }
-
- /**
- * Shared IRQ handling in INTX mode
- */
- ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
- ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
-
- /*
- * PSS control registers
- */
- ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
- ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
- ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
-
- /*
- * IOC semaphore registers and serialization
- */
- ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
- ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
- ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
-
- /**
- * sram memory access
- */
- ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
- ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
- if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
- ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
-}
-
-/**
* Initiate a full firmware download.
*/
static void
@@ -1321,9 +1129,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
boot_type = BFI_BOOT_TYPE_FLASH;
fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
- fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
- fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
- bfa_os_swap32(boot_param);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -1332,17 +1137,17 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
- if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
- chunkno = BFA_FLASH_CHUNK_NO(i);
+ if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+ chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
fwimg = bfa_ioc_fwimg_get_chunk(ioc,
- BFA_FLASH_CHUNK_ADDR(chunkno));
+ BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
/**
* write smem
*/
bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
- fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
+ fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
loff += sizeof(u32);
@@ -1358,6 +1163,14 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
bfa_ioc_smem_pgnum(ioc, 0));
+
+ /*
+ * Set boot type and boot param at the end.
+ */
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
+ bfa_os_swap32(boot_type));
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
+ bfa_os_swap32(boot_param));
}
static void
@@ -1440,168 +1253,10 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
}
/**
- * Initialize IOC to port mapping.
- */
-
-#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
-static void
-bfa_ioc_map_port(struct bfa_ioc_s *ioc)
-{
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
- u32 r32;
-
- /**
- * For crossbow, port id is same as pci function.
- */
- if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
- ioc->port_id = bfa_ioc_pcifn(ioc);
- return;
- }
-
- /**
- * For catapult, base port id on personality register and IOC type
- */
- r32 = bfa_reg_read(rb + FNC_PERS_REG);
- r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
- ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
-
- bfa_trc(ioc, bfa_ioc_pcifn(ioc));
- bfa_trc(ioc, ioc->port_id);
-}
-
-
-
-/**
* bfa_ioc_public
*/
/**
-* Set interrupt mode for a function: INTX or MSIX
- */
-void
-bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
-{
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
- u32 r32, mode;
-
- r32 = bfa_reg_read(rb + FNC_PERS_REG);
- bfa_trc(ioc, r32);
-
- mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
- __F0_INTX_STATUS;
-
- /**
- * If already in desired mode, do not change anything
- */
- if (!msix && mode)
- return;
-
- if (msix)
- mode = __F0_INTX_STATUS_MSIX;
- else
- mode = __F0_INTX_STATUS_INTA;
-
- r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
- r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
- bfa_trc(ioc, r32);
-
- bfa_reg_write(rb + FNC_PERS_REG, r32);
-}
-
-bfa_status_t
-bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
-{
- bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
- u32 pll_sclk, pll_fclk, r32;
-
- if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
- pll_sclk =
- __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
- __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
- __APP_PLL_312_JITLMT0_1(3U) |
- __APP_PLL_312_CNTLMT0_1(1U);
- pll_fclk =
- __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
- __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
- __APP_PLL_425_JITLMT0_1(3U) |
- __APP_PLL_425_CNTLMT0_1(1U);
-
- /**
- * For catapult, choose operational mode FC/FCoE
- */
- if (ioc->fcmode) {
- bfa_reg_write((rb + OP_MODE), 0);
- bfa_reg_write((rb + ETH_MAC_SER_REG),
- __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
- | __APP_EMS_CHANNEL_SEL);
- } else {
- ioc->pllinit = BFA_TRUE;
- bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
- bfa_reg_write((rb + ETH_MAC_SER_REG),
- __APP_EMS_REFCKBUFEN1);
- }
- } else {
- pll_sclk =
- __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
- __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
- __APP_PLL_312_CNTLMT0_1(3U);
- pll_fclk =
- __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
- __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
- __APP_PLL_425_JITLMT0_1(3U) |
- __APP_PLL_425_CNTLMT0_1(3U);
- }
-
- bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
- bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-
- bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
- __APP_PLL_312_LOGIC_SOFT_RESET);
- bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
- __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
- bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
- __APP_PLL_425_LOGIC_SOFT_RESET);
- bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
- __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
- bfa_os_udelay(2);
- bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
- __APP_PLL_312_LOGIC_SOFT_RESET);
- bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
- __APP_PLL_425_LOGIC_SOFT_RESET);
-
- bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
- pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
- bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
- pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
-
- /**
- * Wait for PLLs to lock.
- */
- bfa_os_udelay(2000);
- bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
- bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-
- bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
- bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
-
- if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
- bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
- bfa_os_udelay(1000);
- r32 = bfa_reg_read((rb + MBIST_STAT_REG));
- bfa_trc(ioc, r32);
- }
-
- return BFA_STATUS_OK;
-}
-
-/**
* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
@@ -1642,7 +1297,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
{
- bfa_auto_recover = BFA_FALSE;
+ bfa_auto_recover = auto_recover;
}
@@ -1764,6 +1419,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
ioc->cna = ioc->ctdev && !ioc->fcmode;
+ /**
+ * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
+ */
+ if (ioc->ctdev)
+ bfa_ioc_set_ct_hwif(ioc);
+ else
+ bfa_ioc_set_cb_hwif(ioc);
+
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
}
@@ -1830,7 +1493,6 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
void
bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
{
- bfa_assert(ioc->auto_recover);
ioc->dbg_fwsave = dbg_fwsave;
ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
}
@@ -1973,7 +1635,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
((__sm) == BFI_IOC_INITING) || \
((__sm) == BFI_IOC_HWINIT) || \
((__sm) == BFI_IOC_DISABLED) || \
- ((__sm) == BFI_IOC_HBFAIL) || \
+ ((__sm) == BFI_IOC_FAIL) || \
((__sm) == BFI_IOC_CFG_DISABLED))
/**
@@ -2017,46 +1679,28 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr)
{
struct bfi_ioc_attr_s *ioc_attr;
- char model[BFA_ADAPTER_MODEL_NAME_LEN];
ioc_attr = ioc->attr;
- bfa_os_memcpy((void *)&ad_attr->serial_num,
- (void *)ioc_attr->brcd_serialnum,
- BFA_ADAPTER_SERIAL_NUM_LEN);
-
- bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
- bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
- BFA_VERSION_LEN);
- bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
- BFA_ADAPTER_MFG_NAME_LEN);
+
+ bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+ bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+ bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+ bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
sizeof(struct bfa_mfg_vpd_s));
- ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
- ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
+ ad_attr->nports = bfa_ioc_get_nports(ioc);
+ ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
- /**
- * model name
- */
- if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
- strcpy(model, "BR-10?0");
- model[5] = '0' + ad_attr->nports;
- } else {
- strcpy(model, "Brocade-??5");
- model[8] =
- '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
- model[9] = '0' + ad_attr->nports;
- }
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+ /* For now, model descr uses same model string */
+ bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
ad_attr->prototype = 1;
else
ad_attr->prototype = 0;
- bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
- bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
- BFA_ADAPTER_MODEL_NAME_LEN);
-
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
ad_attr->mac = bfa_ioc_get_mac(ioc);
@@ -2064,41 +1708,122 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
ad_attr->asic_rev = ioc_attr->asic_rev;
- ad_attr->hw_ver[0] = 'R';
- ad_attr->hw_ver[1] = 'e';
- ad_attr->hw_ver[2] = 'v';
- ad_attr->hw_ver[3] = '-';
- ad_attr->hw_ver[4] = ioc_attr->asic_rev;
- ad_attr->hw_ver[5] = '\0';
+
+ bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
ad_attr->cna_capable = ioc->cna;
}
+enum bfa_ioc_type_e
+bfa_ioc_get_type(struct bfa_ioc_s *ioc)
+{
+ if (!ioc->ctdev || ioc->fcmode)
+ return BFA_IOC_TYPE_FC;
+ else if (ioc->ioc_mc == BFI_MC_IOCFC)
+ return BFA_IOC_TYPE_FCoE;
+ else if (ioc->ioc_mc == BFI_MC_LL)
+ return BFA_IOC_TYPE_LL;
+ else {
+ bfa_assert(ioc->ioc_mc == BFI_MC_LL);
+ return BFA_IOC_TYPE_LL;
+ }
+}
+
+void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
+{
+ bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+ bfa_os_memcpy((void *)serial_num,
+ (void *)ioc->attr->brcd_serialnum,
+ BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
+{
+ bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+ bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
+{
+ bfa_assert(chip_rev);
+
+ bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+ chip_rev[0] = 'R';
+ chip_rev[1] = 'e';
+ chip_rev[2] = 'v';
+ chip_rev[3] = '-';
+ chip_rev[4] = ioc->attr->asic_rev;
+ chip_rev[5] = '\0';
+}
+
+void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
+{
+ bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+ bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
+ BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
+{
+ bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+ bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+void
+bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
+{
+ struct bfi_ioc_attr_s *ioc_attr;
+ u8 nports;
+ u8 max_speed;
+
+ bfa_assert(model);
+ bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+ ioc_attr = ioc->attr;
+
+ nports = bfa_ioc_get_nports(ioc);
+ max_speed = bfa_ioc_speed_sup(ioc);
+
+ /**
+ * model name
+ */
+ if (max_speed == 10) {
+ strcpy(model, "BR-10?0");
+ model[5] = '0' + nports;
+ } else {
+ strcpy(model, "Brocade-??5");
+ model[8] = '0' + max_speed;
+ model[9] = '0' + nports;
+ }
+}
+
+enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc_s *ioc)
+{
+ return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+}
+
void
bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
{
bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
- ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
- if (!ioc->ctdev)
- ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
- else if (ioc->ioc_mc == BFI_MC_IOCFC)
- ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
- else if (ioc->ioc_mc == BFI_MC_LL)
- ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
+ ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
- ioc_attr->pci_attr.chip_rev[0] = 'R';
- ioc_attr->pci_attr.chip_rev[1] = 'e';
- ioc_attr->pci_attr.chip_rev[2] = 'v';
- ioc_attr->pci_attr.chip_rev[3] = '-';
- ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
- ioc_attr->pci_attr.chip_rev[5] = '\0';
+ bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
/**
@@ -2195,29 +1920,6 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
}
/**
- * Return true if interrupt should be claimed.
- */
-bfa_boolean_t
-bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
-{
- u32 isr, msk;
-
- /**
- * Always claim if not catapult.
- */
- if (!ioc->ctdev)
- return BFA_TRUE;
-
- /**
- * FALSE if next device is claiming interrupt.
- * TRUE if next device is not interrupting or not present.
- */
- msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
- isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
- return !(isr & ~msk);
-}
-
-/**
* Send AEN notification
*/
static void
@@ -2226,32 +1928,14 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
union bfa_aen_data_u aen_data;
struct bfa_log_mod_s *logmod = ioc->logm;
s32 inst_num = 0;
- struct bfa_ioc_attr_s ioc_attr;
+ enum bfa_ioc_type_e ioc_type;
- switch (event) {
- case BFA_IOC_AEN_HBGOOD:
- bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
- break;
- case BFA_IOC_AEN_HBFAIL:
- bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
- break;
- case BFA_IOC_AEN_ENABLE:
- bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
- break;
- case BFA_IOC_AEN_DISABLE:
- bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
- break;
- case BFA_IOC_AEN_FWMISMATCH:
- bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
- bfa_ioc_get_attr(ioc, &ioc_attr);
- switch (ioc_attr.ioc_type) {
+ ioc_type = bfa_ioc_get_type(ioc);
+ switch (ioc_type) {
case BFA_IOC_TYPE_FC:
aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
break;
@@ -2263,10 +1947,10 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
break;
default:
- bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
+ bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
break;
}
- aen_data.ioc.ioc_type = ioc_attr.ioc_type;
+ aen_data.ioc.ioc_type = ioc_type;
}
/**
@@ -2290,6 +1974,15 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
}
/**
+ * Clear saved firmware trace
+ */
+void
+bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
+{
+ ioc->dbg_fwsave_once = BFA_TRUE;
+}
+
+/**
* Retrieve saved firmware trace from a prior IOC failure.
*/
bfa_status_t
@@ -2304,6 +1997,13 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
loff = bfa_ioc_smem_pgoff(ioc, loff);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
+ return BFA_STATUS_FAILED;
+
bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
tlen = *trclen;
@@ -2329,6 +2029,12 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
}
bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
bfa_ioc_smem_pgnum(ioc, 0));
+
+ /*
+ * release semaphore.
+ */
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
bfa_trc(ioc, pgnum);
*trclen = tlen * sizeof(u32);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 7c30f05ab137..d0804406ea1a 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -74,15 +74,18 @@ struct bfa_ioc_regs_s {
bfa_os_addr_t lpu_mbox_cmd;
bfa_os_addr_t lpu_mbox;
bfa_os_addr_t pss_ctl_reg;
+ bfa_os_addr_t pss_err_status_reg;
bfa_os_addr_t app_pll_fast_ctl_reg;
bfa_os_addr_t app_pll_slow_ctl_reg;
bfa_os_addr_t ioc_sem_reg;
bfa_os_addr_t ioc_usage_sem_reg;
+ bfa_os_addr_t ioc_init_sem_reg;
bfa_os_addr_t ioc_usage_reg;
bfa_os_addr_t host_page_num_fn;
bfa_os_addr_t heartbeat;
bfa_os_addr_t ioc_fwstate;
bfa_os_addr_t ll_halt;
+ bfa_os_addr_t err_set;
bfa_os_addr_t shirq_isr_next;
bfa_os_addr_t shirq_msk_next;
bfa_os_addr_t smem_page_start;
@@ -154,7 +157,6 @@ struct bfa_ioc_s {
struct bfa_timer_s ioc_timer;
struct bfa_timer_s sem_timer;
u32 hb_count;
- u32 hb_fail;
u32 retry_count;
struct list_head hb_notify_q;
void *dbg_fwsave;
@@ -177,6 +179,22 @@ struct bfa_ioc_s {
struct bfi_ioc_attr_s *attr;
struct bfa_ioc_cbfn_s *cbfn;
struct bfa_ioc_mbox_mod_s mbox_mod;
+ struct bfa_ioc_hwif_s *ioc_hwif;
+};
+
+struct bfa_ioc_hwif_s {
+ bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
+ void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
+ u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc,
+ u32 off);
+ u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc);
+ void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
+ void (*ioc_map_port) (struct bfa_ioc_s *ioc);
+ void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc,
+ bfa_boolean_t msix);
+ void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc);
+ void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -191,6 +209,15 @@ struct bfa_ioc_s {
#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc) \
+ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
+
+#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
/**
* IOC mailbox interface
@@ -207,6 +234,14 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
/**
* IOC interfaces
*/
+#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_ownership_reset(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
struct bfa_trc_mod_s *trcmod,
@@ -223,13 +258,21 @@ bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
-void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
-bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
+enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
+void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
+void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
+void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
+void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
+void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
+ char *manufacturer);
+void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
+enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
+
void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
struct bfa_adapter_attr_s *ad_attr);
@@ -237,6 +280,7 @@ int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
int *trclen);
+void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
int *trclen);
u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
@@ -245,6 +289,13 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
struct bfa_ioc_hbfail_notify_s *notify);
+bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
+void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
+void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
+void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *fwhdr);
+bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+ struct bfi_ioc_image_hdr_s *fwhdr);
/*
* bfa mfg wwn API functions
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 000000000000..3ce85319f739
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_ioc.h>
+#include <bfa_fwimg_priv.h>
+#include <cna/bfa_cna_trcmod.h>
+#include <cs/bfa_debug.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_cbreg.h>
+#include <log/bfa_log_hal.h>
+#include <defs/bfa_defs_pci.h>
+
+BFA_TRC_FILE(CNA, IOC_CB);
+
+/*
+ * forward declarations
+ */
+static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
+static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
+static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+
+struct bfa_ioc_hwif_s hwif_cb = {
+ bfa_ioc_cb_pll_init,
+ bfa_ioc_cb_firmware_lock,
+ bfa_ioc_cb_firmware_unlock,
+ bfa_ioc_cb_fwimg_get_chunk,
+ bfa_ioc_cb_fwimg_get_size,
+ bfa_ioc_cb_reg_init,
+ bfa_ioc_cb_map_port,
+ bfa_ioc_cb_isr_mode_set,
+ bfa_ioc_cb_notify_hbfail,
+ bfa_ioc_cb_ownership_reset,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
+{
+ ioc->ioc_hwif = &hwif_cb;
+}
+
+static u32 *
+bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
+{
+ return bfi_image_cb_get_chunk(off);
+}
+
+static u32
+bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
+{
+ return bfi_image_cb_size;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
+{
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
+{
+ bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
+ bfa_reg_read(ioc->ioc_regs.err_set);
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
+};
+
+static void
+bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ }
+
+ /**
+ * Host <-> LPU mailbox command/status registers
+ */
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
+
+ /*
+ * err set reg : for notification of hb failure
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+static void
+bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
+{
+ /**
+ * For crossbow, port id is same as pci function.
+ */
+ ioc->port_id = bfa_ioc_pcifn(ioc);
+ bfa_trc(ioc, ioc->port_id);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+}
+
+static bfa_status_t
+bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 pll_sclk, pll_fclk;
+
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
+ __APP_PLL_212_P0_1(3U) |
+ __APP_PLL_212_JITLMT0_1(3U) |
+ __APP_PLL_212_CNTLMT0_1(3U);
+ pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
+ __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
+ __APP_PLL_400_JITLMT0_1(3U) |
+ __APP_PLL_400_CNTLMT0_1(3U);
+
+ bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
+ bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
+
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_212_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_212_BYPASS |
+ __APP_PLL_212_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_400_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_400_BYPASS |
+ __APP_PLL_400_LOGIC_SOFT_RESET);
+ bfa_os_udelay(2);
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ __APP_PLL_212_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ __APP_PLL_400_LOGIC_SOFT_RESET);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+ pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+ pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
+
+ /**
+ * Wait for PLLs to lock.
+ */
+ bfa_os_udelay(2000);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
+
+ /*
+ * release semaphore.
+ */
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ bfa_ioc_hw_sem_release(ioc);
+}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 000000000000..20b58ad5f95c
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_ioc.h>
+#include <bfa_fwimg_priv.h>
+#include <cna/bfa_cna_trcmod.h>
+#include <cs/bfa_debug.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_ctreg.h>
+#include <log/bfa_log_hal.h>
+#include <defs/bfa_defs_pci.h>
+
+BFA_TRC_FILE(CNA, IOC_CT);
+
+/*
+ * forward declarations
+ */
+static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
+static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
+ u32 off);
+static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+
+struct bfa_ioc_hwif_s hwif_ct = {
+ bfa_ioc_ct_pll_init,
+ bfa_ioc_ct_firmware_lock,
+ bfa_ioc_ct_firmware_unlock,
+ bfa_ioc_ct_fwimg_get_chunk,
+ bfa_ioc_ct_fwimg_get_size,
+ bfa_ioc_ct_reg_init,
+ bfa_ioc_ct_map_port,
+ bfa_ioc_ct_isr_mode_set,
+ bfa_ioc_ct_notify_hbfail,
+ bfa_ioc_ct_ownership_reset,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+ ioc->ioc_hwif = &hwif_ct;
+}
+
+static u32*
+bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
+{
+ return bfi_image_ct_get_chunk(off);
+}
+
+static u32
+bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
+{
+ return bfi_image_ct_size;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
+{
+ enum bfi_ioc_state ioc_fwstate;
+ u32 usecnt;
+ struct bfi_ioc_image_hdr_s fwhdr;
+
+ /**
+ * Firmware match check is relevant only for CNA.
+ */
+ if (!ioc->cna)
+ return BFA_TRUE;
+
+ /**
+ * If bios boot (flash based) -- do not increment usage count
+ */
+ if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+ return BFA_TRUE;
+
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+
+ /**
+ * If usage count is 0, always return TRUE.
+ */
+ if (usecnt == 0) {
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+ }
+
+ ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+ bfa_trc(ioc, ioc_fwstate);
+
+ /**
+ * Use count cannot be non-zero and chip in uninitialized state.
+ */
+ bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+
+ /**
+ * Check if another driver with a different firmware is active
+ */
+ bfa_ioc_fwver_get(ioc, &fwhdr);
+ if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_trc(ioc, usecnt);
+ return BFA_FALSE;
+ }
+
+ /**
+ * Same firmware version. Increment the reference count.
+ */
+ usecnt++;
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_trc(ioc, usecnt);
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+ u32 usecnt;
+
+ /**
+ * Firmware lock is relevant only for CNA.
+ * If bios boot (flash based) -- do not decrement usage count
+ */
+ if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+ return;
+
+ /**
+ * decrement usage count
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+ bfa_assert(usecnt > 0);
+
+ usecnt--;
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+ bfa_trc(ioc, usecnt);
+
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
+{
+ if (ioc->cna) {
+ bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+ /* Wait for halt to take effect */
+ bfa_reg_read(ioc->ioc_regs.ll_halt);
+ } else {
+ bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
+ bfa_reg_read(ioc->ioc_regs.err_set);
+ }
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+ { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+ { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+ { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+ { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
+ { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
+ { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
+ { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
+ { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
+ { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb;
+ int pcifn = bfa_ioc_pcifn(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+ if (ioc->port_id == 0) {
+ ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+ /**
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ /**
+ * For catapult, base port id on personality register and IOC type
+ */
+ r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+ ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+ bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+ bfa_trc(ioc, ioc->port_id);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 r32, mode;
+
+ r32 = bfa_reg_read(rb + FNC_PERS_REG);
+ bfa_trc(ioc, r32);
+
+ mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+ __F0_INTX_STATUS;
+
+ /**
+ * If already in desired mode, do not change anything
+ */
+ if (!msix && mode)
+ return;
+
+ if (msix)
+ mode = __F0_INTX_STATUS_MSIX;
+ else
+ mode = __F0_INTX_STATUS_INTA;
+
+ r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+ bfa_trc(ioc, r32);
+
+ bfa_reg_write(rb + FNC_PERS_REG, r32);
+}
+
+static bfa_status_t
+bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
+{
+ bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
+ u32 pll_sclk, pll_fclk, r32;
+
+ /*
+ * Hold semaphore so that nobody can access the chip during init.
+ */
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+ pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
+ __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
+ __APP_PLL_312_JITLMT0_1(3U) |
+ __APP_PLL_312_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
+ __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+ __APP_PLL_425_JITLMT0_1(3U) |
+ __APP_PLL_425_CNTLMT0_1(1U);
+
+ /**
+ * For catapult, choose operational mode FC/FCoE
+ */
+ if (ioc->fcmode) {
+ bfa_reg_write((rb + OP_MODE), 0);
+ bfa_reg_write((rb + ETH_MAC_SER_REG),
+ __APP_EMS_CMLCKSEL |
+ __APP_EMS_REFCKBUFEN2 |
+ __APP_EMS_CHANNEL_SEL);
+ } else {
+ ioc->pllinit = BFA_TRUE;
+ bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
+ bfa_reg_write((rb + ETH_MAC_SER_REG),
+ __APP_EMS_REFCKBUFEN1);
+ }
+
+ bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
+ bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
+
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET);
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+ __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+ __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
+
+ /**
+ * Wait for PLLs to lock.
+ */
+ bfa_reg_read(rb + HOSTFN0_INT_MSK);
+ bfa_os_udelay(2000);
+ bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+ bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+
+ bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+ __APP_PLL_312_ENABLE);
+ bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+ __APP_PLL_425_ENABLE);
+
+ bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
+ bfa_os_udelay(1000);
+ r32 = bfa_reg_read((rb + MBIST_STAT_REG));
+ bfa_trc(ioc, r32);
+ /*
+ * release semaphore.
+ */
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+ return BFA_STATUS_OK;
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+ if (ioc->cna) {
+ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+ bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+ bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+ }
+
+ /*
+ * Read the hw sem reg to make sure that it is locked
+ * before we clear it. If it is not locked, writing 1
+ * will lock it instead of clearing it.
+ */
+ bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+ bfa_ioc_hw_sem_release(ioc);
+}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index d7ab792a9e54..a76de2669bfc 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -172,6 +172,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
*/
if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
iocfc->hwif.hw_reginit = bfa_hwct_reginit;
+ iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
@@ -180,6 +181,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
} else {
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
+ iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
@@ -336,8 +338,10 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
else
bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
- } else
- bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+ } else {
+ if (bfa->iocfc.cfgdone)
+ bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+ }
}
static void
@@ -619,8 +623,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
bfa->trcmod, bfa->aen, bfa->logm);
- bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
- bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
/**
* Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
@@ -628,6 +630,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
if (0)
bfa_ioc_set_fcmode(&bfa->ioc);
+ bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+ bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
+
bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
bfa_iocfc_mem_claim(bfa, cfg, meminfo);
bfa_timer_init(&bfa->timer_mod);
@@ -654,7 +659,6 @@ bfa_iocfc_init(struct bfa_s *bfa)
{
bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
bfa_ioc_enable(&bfa->ioc);
- bfa_msix_install(bfa);
}
/**
@@ -797,6 +801,11 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
return BFA_STATUS_DEVBUSY;
}
+ if (!bfa_iocfc_is_operational(bfa)) {
+ bfa_trc(bfa, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
iocfc->stats_busy = BFA_TRUE;
iocfc->stats_ret = stats;
iocfc->stats_cbfn = cbfn;
@@ -817,6 +826,11 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
return BFA_STATUS_DEVBUSY;
}
+ if (!bfa_iocfc_is_operational(bfa)) {
+ bfa_trc(bfa, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
iocfc->stats_busy = BFA_TRUE;
iocfc->stats_cbfn = cbfn;
iocfc->stats_cbarg = cbarg;
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index ce9a830a4207..fbb4bdc9d600 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -54,6 +54,7 @@ struct bfa_msix_s {
*/
struct bfa_hwif_s {
void (*hw_reginit)(struct bfa_s *bfa);
+ void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
void (*hw_msix_install)(struct bfa_s *bfa);
@@ -143,6 +144,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwcb_msix_install(struct bfa_s *bfa);
@@ -151,6 +153,7 @@ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
void bfa_hwct_msix_install(struct bfa_s *bfa);
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index f81d359b7089..5b107abe46e5 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -149,7 +149,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -194,7 +194,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -259,7 +259,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -317,7 +317,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -377,7 +377,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -419,7 +419,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -467,7 +467,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -516,7 +516,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -544,7 +544,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -577,7 +577,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
@@ -605,7 +605,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ioim->bfa, event);
}
}
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
index eabf7d38bd09..a914ff255135 100644
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -144,7 +144,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -175,7 +175,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -212,7 +212,7 @@ bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -247,7 +247,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -275,7 +275,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -317,7 +317,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -348,7 +348,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -385,7 +385,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -413,7 +413,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -442,7 +442,7 @@ bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -470,7 +470,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -502,7 +502,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -538,7 +538,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -559,7 +559,7 @@ bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
@@ -583,7 +583,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->bfa, event);
}
}
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
index 9844b45412b6..ad06f6189092 100644
--- a/drivers/scsi/bfa/bfa_lps.c
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -18,6 +18,7 @@
#include <bfa.h>
#include <bfi/bfi_lps.h>
#include <cs/bfa_debug.h>
+#include <defs/bfa_defs_pci.h>
BFA_TRC_FILE(HAL, LPS);
BFA_MODULE(lps);
@@ -25,6 +26,12 @@ BFA_MODULE(lps);
#define BFA_LPS_MIN_LPORTS (1)
#define BFA_LPS_MAX_LPORTS (256)
+/*
+ * Maximum Vports supported per physical port or vf.
+ */
+#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
+#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
+
/**
* forward declarations
*/
@@ -49,7 +56,7 @@ static void bfa_lps_send_login(struct bfa_lps_s *lps);
static void bfa_lps_send_logout(struct bfa_lps_s *lps);
static void bfa_lps_login_comp(struct bfa_lps_s *lps);
static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
-
+static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
/**
* lps_pvt BFA LPS private functions
@@ -62,6 +69,7 @@ enum bfa_lps_event {
BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
BFA_LPS_SM_DELETE = 5, /* lps delete from user */
BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
+ BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
};
static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -91,6 +99,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
bfa_sm_set_state(lps, bfa_lps_sm_login);
bfa_lps_send_login(lps);
}
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FDISC Request");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FLOGI Request");
break;
case BFA_LPS_SM_LOGOUT:
@@ -101,6 +115,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
bfa_lps_free(lps);
break;
+ case BFA_LPS_SM_RX_CVL:
case BFA_LPS_SM_OFFLINE:
break;
@@ -112,7 +127,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -127,10 +142,25 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
switch (event) {
case BFA_LPS_SM_FWRSP:
- if (lps->status == BFA_STATUS_OK)
+ if (lps->status == BFA_STATUS_OK) {
bfa_sm_set_state(lps, bfa_lps_sm_online);
- else
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FDISC Accept");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+ } else {
bfa_sm_set_state(lps, bfa_lps_sm_init);
+ if (lps->fdisc)
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0,
+ "FDISC Fail (RJT or timeout)");
+ else
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGIN, 0,
+ "FLOGI Fail (RJT or timeout)");
+ }
bfa_lps_login_comp(lps);
break;
@@ -139,7 +169,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -162,8 +192,16 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
bfa_reqq_wcancel(&lps->wqe);
break;
+ case BFA_LPS_SM_RX_CVL:
+ /*
+ * Login was not even sent out; so when getting out
+ * of this state, it will appear like a login retry
+ * after Clear virtual link
+ */
+ break;
+
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -185,6 +223,17 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
bfa_sm_set_state(lps, bfa_lps_sm_logout);
bfa_lps_send_logout(lps);
}
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_LOGO, 0, "Logout");
+ break;
+
+ case BFA_LPS_SM_RX_CVL:
+ bfa_sm_set_state(lps, bfa_lps_sm_init);
+
+ /* Let the vport module know about this event */
+ bfa_lps_cvl_event(lps);
+ bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+ BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
break;
case BFA_LPS_SM_OFFLINE:
@@ -193,7 +242,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -217,7 +266,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -242,7 +291,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(lps->bfa, event);
}
}
@@ -396,6 +445,20 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
}
/**
+ * Firmware received a Clear virtual link request (for FCoE)
+ */
+static void
+bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct bfa_lps_s *lps;
+
+ lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+
+ bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
+}
+
+/**
* Space is available in request queue, resume queueing request to firmware.
*/
static void
@@ -531,7 +594,48 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
}
+/**
+ * Clear virtual link completion handler for non-fcs
+ */
+static void
+bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
+{
+ struct bfa_lps_s *lps = arg;
+
+ if (!complete)
+ return;
+
+ /* Clear virtual link to base port will result in link down */
+ if (lps->fdisc)
+ bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+/**
+ * Received Clear virtual link event --direct call for fcs,
+ * queue for others
+ */
+static void
+bfa_lps_cvl_event(struct bfa_lps_s *lps)
+{
+ if (!lps->bfa->fcs) {
+ bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
+ lps);
+ return;
+ }
+
+ /* Clear virtual link to base port will result in link down */
+ if (lps->fdisc)
+ bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+u32
+bfa_lps_get_max_vport(struct bfa_s *bfa)
+{
+ if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
+ return BFA_LPS_MAX_VPORTS_SUPP_CT;
+ else
+ return BFA_LPS_MAX_VPORTS_SUPP_CB;
+}
/**
* lps_public BFA LPS public functions
@@ -752,6 +856,14 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
return lps->lsrjt_expl;
}
+/**
+ * Return fpma/spma MAC for lport
+ */
+struct mac_s
+bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
+{
+ return lps->lp_mac;
+}
/**
* LPS firmware message class handler.
@@ -773,6 +885,10 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_lps_logout_rsp(bfa, msg.logout_rsp);
break;
+ case BFI_LPS_H2I_CVL_EVENT:
+ bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
+ break;
+
default:
bfa_trc(bfa, m->mhdr.msg_id);
bfa_assert(0);
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c
index 32eda8e1ec65..a7fcc80c177e 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_module.c
@@ -24,7 +24,7 @@
*/
struct bfa_module_s *hal_mods[] = {
&hal_mod_sgpg,
- &hal_mod_pport,
+ &hal_mod_fcport,
&hal_mod_fcxp,
&hal_mod_lps,
&hal_mod_uf,
@@ -45,7 +45,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
bfa_isr_unhandled, /* BFI_MC_DIAG */
bfa_isr_unhandled, /* BFI_MC_FLASH */
bfa_isr_unhandled, /* BFI_MC_CEE */
- bfa_pport_isr, /* BFI_MC_PORT */
+ bfa_fcport_isr, /* BFI_MC_FCPORT */
bfa_isr_unhandled, /* BFI_MC_IOCFC */
bfa_isr_unhandled, /* BFI_MC_LL */
bfa_uf_isr, /* BFI_MC_UF */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
index 96f70534593c..f554c2fad6a9 100644
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ b/drivers/scsi/bfa/bfa_modules_priv.h
@@ -29,7 +29,7 @@
struct bfa_modules_s {
- struct bfa_pport_s pport; /* physical port module */
+ struct bfa_fcport_s fcport; /* fc port module */
struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
struct bfa_lps_mod_s lps_mod; /* fcxp module */
struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 51f698a06b6d..40e256ec67ff 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -23,9 +23,19 @@
#include "bfa_intr_priv.h"
/**
- * BFA physical port data structure
+ * Link notification data structure
*/
-struct bfa_pport_s {
+struct bfa_fcport_ln_s {
+ struct bfa_fcport_s *fcport;
+ bfa_sm_t sm;
+ struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */
+ enum bfa_pport_linkstate ln_event; /* ln event for callback */
+};
+
+/**
+ * BFA FC port data structure
+ */
+struct bfa_fcport_s {
struct bfa_s *bfa; /* parent BFA instance */
bfa_sm_t sm; /* port state machine */
wwn_t nwwn; /* node wwn of physical port */
@@ -36,6 +46,8 @@ struct bfa_pport_s {
enum bfa_pport_topology topology; /* current topology */
u8 myalpa; /* my ALPA in LOOP topology */
u8 rsvd[3];
+ u32 mypid:24;
+ u32 rsvd_b:8;
struct bfa_pport_cfg_s cfg; /* current port configuration */
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */
@@ -49,42 +61,31 @@ struct bfa_pport_s {
void (*event_cbfn) (void *cbarg,
bfa_pport_event_t event);
union {
- union bfi_pport_i2h_msg_u i2hmsg;
+ union bfi_fcport_i2h_msg_u i2hmsg;
} event_arg;
void *bfad; /* BFA driver handle */
+ struct bfa_fcport_ln_s ln; /* Link Notification */
struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */
- enum bfa_pport_linkstate hcb_event;
- /* link event for callback */
+ struct bfa_timer_s timer; /* timer */
u32 msgtag; /* fimrware msg tag for reply */
u8 *stats_kva;
u64 stats_pa;
- union bfa_pport_stats_u *stats; /* pport stats */
- u32 mypid:24;
- u32 rsvd_b:8;
- struct bfa_timer_s timer; /* timer */
- union bfa_pport_stats_u *stats_ret;
- /* driver stats location */
- bfa_status_t stats_status;
- /* stats/statsclr status */
- bfa_boolean_t stats_busy;
- /* outstanding stats/statsclr */
- bfa_boolean_t stats_qfull;
- bfa_boolean_t diag_busy;
- /* diag busy status */
- bfa_boolean_t beacon;
- /* port beacon status */
- bfa_boolean_t link_e2e_beacon;
- /* link beacon status */
- bfa_cb_pport_t stats_cbfn;
- /* driver callback function */
- void *stats_cbarg;
- /* *!< user callback arg */
+ union bfa_fcport_stats_u *stats;
+ union bfa_fcport_stats_u *stats_ret; /* driver stats location */
+ bfa_status_t stats_status; /* stats/statsclr status */
+ bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
+ bfa_boolean_t stats_qfull;
+ bfa_cb_pport_t stats_cbfn; /* driver callback function */
+ void *stats_cbarg; /* *!< user callback arg */
+ bfa_boolean_t diag_busy; /* diag busy status */
+ bfa_boolean_t beacon; /* port beacon status */
+ bfa_boolean_t link_e2e_beacon; /* link beacon status */
};
-#define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport)
+#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
/*
* public functions
*/
-void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
#endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
index 0747a6b26f7b..be80fc7e1b0e 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -101,7 +101,7 @@ extern bfa_boolean_t bfa_auto_recover;
extern struct bfa_module_s hal_mod_flash;
extern struct bfa_module_s hal_mod_fcdiag;
extern struct bfa_module_s hal_mod_sgpg;
-extern struct bfa_module_s hal_mod_pport;
+extern struct bfa_module_s hal_mod_fcport;
extern struct bfa_module_s hal_mod_fcxp;
extern struct bfa_module_s hal_mod_lps;
extern struct bfa_module_s hal_mod_uf;
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 3e1990a74258..7c509fa244e4 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -114,7 +114,7 @@ bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_un_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -146,7 +146,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_cr_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -183,7 +183,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_fwc_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -224,7 +224,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_fwc_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -296,7 +296,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_on_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -329,7 +329,7 @@ bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_fwd_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -359,7 +359,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_fwd_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -394,7 +394,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_off_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -421,7 +421,7 @@ bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -446,7 +446,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -477,7 +477,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
default:
bfa_stats(rp, sm_delp_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -512,7 +512,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
default:
bfa_stats(rp, sm_offp_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
@@ -550,7 +550,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
default:
bfa_stats(rp, sm_iocd_unexp);
- bfa_assert(0);
+ bfa_sm_fault(rp->bfa, event);
}
}
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
index b3562dce7e9f..a7a82610db85 100644
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ b/drivers/scsi/bfa/bfa_trcmod_priv.h
@@ -29,38 +29,36 @@
* !!! needed between trace utility and driver version
*/
enum {
- BFA_TRC_HAL_IOC = 1,
- BFA_TRC_HAL_INTR = 2,
- BFA_TRC_HAL_FCXP = 3,
- BFA_TRC_HAL_UF = 4,
- BFA_TRC_HAL_DIAG = 5,
- BFA_TRC_HAL_RPORT = 6,
- BFA_TRC_HAL_FCPIM = 7,
- BFA_TRC_HAL_IOIM = 8,
- BFA_TRC_HAL_TSKIM = 9,
- BFA_TRC_HAL_ITNIM = 10,
- BFA_TRC_HAL_PPORT = 11,
- BFA_TRC_HAL_SGPG = 12,
- BFA_TRC_HAL_FLASH = 13,
- BFA_TRC_HAL_DEBUG = 14,
- BFA_TRC_HAL_WWN = 15,
- BFA_TRC_HAL_FLASH_RAW = 16,
- BFA_TRC_HAL_SBOOT = 17,
- BFA_TRC_HAL_SBOOT_IO = 18,
- BFA_TRC_HAL_SBOOT_INTR = 19,
- BFA_TRC_HAL_SBTEST = 20,
- BFA_TRC_HAL_IPFC = 21,
- BFA_TRC_HAL_IOCFC = 22,
- BFA_TRC_HAL_FCPTM = 23,
- BFA_TRC_HAL_IOTM = 24,
- BFA_TRC_HAL_TSKTM = 25,
- BFA_TRC_HAL_TIN = 26,
- BFA_TRC_HAL_LPS = 27,
- BFA_TRC_HAL_FCDIAG = 28,
- BFA_TRC_HAL_PBIND = 29,
- BFA_TRC_HAL_IOCFC_CT = 30,
- BFA_TRC_HAL_IOCFC_CB = 31,
- BFA_TRC_HAL_IOCFC_Q = 32,
+ BFA_TRC_HAL_INTR = 1,
+ BFA_TRC_HAL_FCXP = 2,
+ BFA_TRC_HAL_UF = 3,
+ BFA_TRC_HAL_RPORT = 4,
+ BFA_TRC_HAL_FCPIM = 5,
+ BFA_TRC_HAL_IOIM = 6,
+ BFA_TRC_HAL_TSKIM = 7,
+ BFA_TRC_HAL_ITNIM = 8,
+ BFA_TRC_HAL_FCPORT = 9,
+ BFA_TRC_HAL_SGPG = 10,
+ BFA_TRC_HAL_FLASH = 11,
+ BFA_TRC_HAL_DEBUG = 12,
+ BFA_TRC_HAL_WWN = 13,
+ BFA_TRC_HAL_FLASH_RAW = 14,
+ BFA_TRC_HAL_SBOOT = 15,
+ BFA_TRC_HAL_SBOOT_IO = 16,
+ BFA_TRC_HAL_SBOOT_INTR = 17,
+ BFA_TRC_HAL_SBTEST = 18,
+ BFA_TRC_HAL_IPFC = 19,
+ BFA_TRC_HAL_IOCFC = 20,
+ BFA_TRC_HAL_FCPTM = 21,
+ BFA_TRC_HAL_IOTM = 22,
+ BFA_TRC_HAL_TSKTM = 23,
+ BFA_TRC_HAL_TIN = 24,
+ BFA_TRC_HAL_LPS = 25,
+ BFA_TRC_HAL_FCDIAG = 26,
+ BFA_TRC_HAL_PBIND = 27,
+ BFA_TRC_HAL_IOCFC_CT = 28,
+ BFA_TRC_HAL_IOCFC_CB = 29,
+ BFA_TRC_HAL_IOCFC_Q = 30,
};
#endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
index ff7a4dc0bf3c..ad9aaaedd3f1 100644
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -110,7 +110,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -146,7 +146,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -178,7 +178,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -207,7 +207,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -242,7 +242,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -277,7 +277,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
@@ -303,7 +303,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(tskim->bfa, event);
}
}
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index b52b773d49d9..6bff08ea4029 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -20,6 +20,7 @@
*/
#include <linux/module.h>
+#include <linux/kthread.h>
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfad_tm.h"
@@ -53,6 +54,7 @@ static int log_level = BFA_LOG_WARNING;
static int ioc_auto_recover = BFA_TRUE;
static int ipfc_enable = BFA_FALSE;
static int ipfc_mtu = -1;
+static int fdmi_enable = BFA_TRUE;
int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
int bfa_linkup_delay = -1;
@@ -74,6 +76,7 @@ module_param(log_level, int, S_IRUGO | S_IWUSR);
module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
+module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
/*
@@ -95,6 +98,8 @@ bfad_fc4_probe(struct bfad_s *bfad)
if (ipfc_enable)
bfad_ipfc_probe(bfad);
+
+ bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
ext:
return rc;
}
@@ -106,6 +111,7 @@ bfad_fc4_probe_undo(struct bfad_s *bfad)
bfad_tm_probe_undo(bfad);
if (ipfc_enable)
bfad_ipfc_probe_undo(bfad);
+ bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
}
static void
@@ -173,9 +179,19 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
{
struct bfad_s *bfad = drv;
- if (init_status == BFA_STATUS_OK)
+ if (init_status == BFA_STATUS_OK) {
bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
+ /* If BFAD_HAL_INIT_FAIL flag is set:
+ * Wake up the kernel thread to start
+ * the bfad operations after HAL init done
+ */
+ if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
+ bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
+ wake_up_process(bfad->bfad_tsk);
+ }
+ }
+
complete(&bfad->comp);
}
@@ -648,7 +664,7 @@ bfad_fcs_port_cfg(struct bfad_s *bfad)
sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
- bfa_pport_get_attr(&bfad->bfa, &attr);
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
port_cfg.nwwn = attr.nwwn;
port_cfg.pwwn = attr.pwwn;
@@ -661,7 +677,6 @@ bfad_drv_init(struct bfad_s *bfad)
bfa_status_t rc;
unsigned long flags;
struct bfa_fcs_driver_info_s driver_info;
- int i;
bfad->cfg_data.rport_del_timeout = rport_del_timeout;
bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -681,12 +696,7 @@ bfad_drv_init(struct bfad_s *bfad)
bfa_init_log(&bfad->bfa, bfad->logmod);
bfa_init_trc(&bfad->bfa, bfad->trcmod);
bfa_init_aen(&bfad->bfa, bfad->aen);
- INIT_LIST_HEAD(&bfad->file_q);
- INIT_LIST_HEAD(&bfad->file_free_q);
- for (i = 0; i < BFAD_AEN_MAX_APPS; i++) {
- bfa_q_qe_init(&bfad->file_buf[i].qe);
- list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q);
- }
+ memset(bfad->file_map, 0, sizeof(bfad->file_map));
bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
bfa_plog_init(&bfad->plog_buf);
bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -746,8 +756,16 @@ bfad_drv_init(struct bfad_s *bfad)
bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
- bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+ bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+
+ /* Do FCS init only when HAL init is done */
+ if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+ bfa_fcs_init(&bfad->bfa_fcs);
+ bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
+ }
+
bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
+ bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -763,12 +781,21 @@ out_hal_mem_alloc_failure:
void
bfad_drv_uninit(struct bfad_s *bfad)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_stop(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+
del_timer_sync(&bfad->hal_tmo);
bfa_isr_disable(&bfad->bfa);
bfa_detach(&bfad->bfa);
bfad_remove_intr(bfad);
- bfa_assert(list_empty(&bfad->file_q));
bfad_hal_mem_release(bfad);
+
+ bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
}
void
@@ -859,6 +886,86 @@ bfad_drv_log_level_set(struct bfad_s *bfad)
bfa_log_set_level_all(&bfad->log_data, log_level);
}
+bfa_status_t
+bfad_start_ops(struct bfad_s *bfad)
+{
+ int retval;
+
+ /* PPORT FCS config */
+ bfad_fcs_port_cfg(bfad);
+
+ retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
+ if (retval != BFA_STATUS_OK)
+ goto out_cfg_pport_failure;
+
+ /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
+ retval = bfad_fc4_probe(bfad);
+ if (retval != BFA_STATUS_OK) {
+ printk(KERN_WARNING "bfad_fc4_probe failed\n");
+ goto out_fc4_probe_failure;
+ }
+
+ bfad_drv_start(bfad);
+
+ /*
+ * If bfa_linkup_delay is set to -1 default; try to retrive the
+ * value using the bfad_os_get_linkup_delay(); else use the
+ * passed in module param value as the bfa_linkup_delay.
+ */
+ if (bfa_linkup_delay < 0) {
+
+ bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
+ bfad_os_rport_online_wait(bfad);
+ bfa_linkup_delay = -1;
+
+ } else {
+ bfad_os_rport_online_wait(bfad);
+ }
+
+ bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
+
+ return BFA_STATUS_OK;
+
+out_fc4_probe_failure:
+ bfad_fc4_probe_undo(bfad);
+ bfad_uncfg_pport(bfad);
+out_cfg_pport_failure:
+ return BFA_STATUS_FAILED;
+}
+
+int
+bfad_worker (void *ptr)
+{
+ struct bfad_s *bfad;
+ unsigned long flags;
+
+ bfad = (struct bfad_s *)ptr;
+
+ while (!kthread_should_stop()) {
+
+ /* Check if the FCS init is done from bfad_drv_init;
+ * if not done do FCS init and set the flag.
+ */
+ if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcs_init(&bfad->bfa_fcs);
+ bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ /* Start the bfad operations after HAL init done */
+ bfad_start_ops(bfad);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfad_tsk = NULL;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ break;
+ }
+
+ return 0;
+}
+
/*
* PCI_entry PCI driver entries * {
*/
@@ -871,7 +978,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct bfad_s *bfad;
int error = -ENODEV, retval;
- char buf[16];
/*
* For single port cards - only claim function 0
@@ -902,8 +1008,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfa_trc(bfad, bfad_inst);
bfad->logmod = &bfad->log_data;
- sprintf(buf, "%d", bfad_inst);
- bfa_log_init(bfad->logmod, buf, bfa_os_printf);
+ bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
bfad_drv_log_level_set(bfad);
@@ -933,57 +1038,39 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfad->ref_count = 0;
bfad->pport.bfad = bfad;
+ bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
+ "bfad_worker");
+ if (IS_ERR(bfad->bfad_tsk)) {
+ printk(KERN_INFO "bfad[%d]: Kernel thread"
+ " creation failed!\n",
+ bfad->inst_no);
+ goto out_kthread_create_failure;
+ }
+
retval = bfad_drv_init(bfad);
if (retval != BFA_STATUS_OK)
goto out_drv_init_failure;
if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+ bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
goto ok;
}
- /*
- * PPORT FCS config
- */
- bfad_fcs_port_cfg(bfad);
-
- retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
+ retval = bfad_start_ops(bfad);
if (retval != BFA_STATUS_OK)
- goto out_cfg_pport_failure;
-
- /*
- * BFAD level FC4 (IM/TM/IPFC) specific resource allocation
- */
- retval = bfad_fc4_probe(bfad);
- if (retval != BFA_STATUS_OK) {
- printk(KERN_WARNING "bfad_fc4_probe failed\n");
- goto out_fc4_probe_failure;
- }
+ goto out_start_ops_failure;
- bfad_drv_start(bfad);
-
- /*
- * If bfa_linkup_delay is set to -1 default; try to retrive the
- * value using the bfad_os_get_linkup_delay(); else use the
- * passed in module param value as the bfa_linkup_delay.
- */
- if (bfa_linkup_delay < 0) {
- bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
- bfad_os_rport_online_wait(bfad);
- bfa_linkup_delay = -1;
- } else {
- bfad_os_rport_online_wait(bfad);
- }
+ kthread_stop(bfad->bfad_tsk);
+ bfad->bfad_tsk = NULL;
- bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
ok:
return 0;
-out_fc4_probe_failure:
- bfad_fc4_probe_undo(bfad);
- bfad_uncfg_pport(bfad);
-out_cfg_pport_failure:
+out_start_ops_failure:
bfad_drv_uninit(bfad);
out_drv_init_failure:
+ kthread_stop(bfad->bfad_tsk);
+out_kthread_create_failure:
mutex_lock(&bfad_mutex);
bfad_inst--;
list_del(&bfad->list_entry);
@@ -1008,6 +1095,11 @@ bfad_pci_remove(struct pci_dev *pdev)
bfa_trc(bfad, bfad->inst_no);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfad->bfad_tsk != NULL)
+ kthread_stop(bfad->bfad_tsk);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
&& !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
@@ -1024,13 +1116,25 @@ bfad_pci_remove(struct pci_dev *pdev)
goto remove_sysfs;
}
- if (bfad->bfad_flags & BFAD_HAL_START_DONE)
+ if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
bfad_drv_stop(bfad);
+ } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
+ /* Invoking bfa_stop() before bfa_detach
+ * when HAL and DRV init are success
+ * but HAL start did not occur.
+ */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfa_stop(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+ }
bfad_remove_intr(bfad);
-
del_timer_sync(&bfad->hal_tmo);
- bfad_fc4_probe_undo(bfad);
+
+ if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE)
+ bfad_fc4_probe_undo(bfad);
if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
bfad_uncfg_pport(bfad);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9129ae3040ff..d97f69191838 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -141,7 +141,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
- bfa_pport_get_attr(&bfad->bfa, &attr);
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.port_type) {
case BFA_PPORT_TYPE_NPORT:
@@ -173,7 +173,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
- bfa_pport_get_attr(&bfad->bfa, &attr);
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.port_state) {
case BFA_PPORT_ST_LINKDOWN:
@@ -229,8 +229,10 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
struct bfa_pport_attr_s attr;
+ unsigned long flags;
- bfa_pport_get_attr(&bfad->bfa, &attr);
+ spin_lock_irqsave(shost->host_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &attr);
switch (attr.speed) {
case BFA_PPORT_SPEED_8GBPS:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
@@ -248,6 +250,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
@@ -285,7 +288,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
memset(hstats, 0, sizeof(struct fc_host_statistics));
- rc = bfa_pport_get_stats(&bfad->bfa,
+ rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
(union bfa_pport_stats_u *) hstats,
bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -312,7 +315,8 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
init_completion(&fcomp.comp);
spin_lock_irqsave(&bfad->bfad_lock, flags);
- rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
+ &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
if (rc != BFA_STATUS_OK)
@@ -421,12 +425,10 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- ioc_attr.adapter_attr.serial_num);
+ bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
+ return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
}
static ssize_t
@@ -437,11 +439,10 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model);
+ bfa_get_adapter_model(&bfad->bfa, model);
+ return snprintf(buf, PAGE_SIZE, "%s\n", model);
}
static ssize_t
@@ -452,12 +453,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- ioc_attr.adapter_attr.model_descr);
+ bfa_get_adapter_model(&bfad->bfa, model_descr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
}
static ssize_t
@@ -482,14 +481,13 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
-
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ bfa_get_adapter_model(&bfad->bfa, model);
+ bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
- ioc_attr.adapter_attr.model,
- ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
+ model, fw_ver, BFAD_DRIVER_VERSION);
}
static ssize_t
@@ -500,11 +498,10 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char hw_ver[BFA_VERSION_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver);
+ bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
}
static ssize_t
@@ -522,12 +519,10 @@ bfad_im_optionrom_version_show(struct device *dev,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char optrom_ver[BFA_VERSION_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- ioc_attr.adapter_attr.optrom_ver);
+ bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
}
static ssize_t
@@ -538,11 +533,10 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
+ char fw_ver[BFA_VERSION_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver);
+ bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
+ return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
}
static ssize_t
@@ -553,11 +547,9 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_ioc_attr_s ioc_attr;
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
- return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ bfa_get_nports(&bfad->bfa));
}
static ssize_t
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
index 4d3312da6a81..bf0102076508 100644
--- a/drivers/scsi/bfa/bfad_attr.h
+++ b/drivers/scsi/bfa/bfad_attr.h
@@ -17,9 +17,6 @@
#ifndef __BFAD_ATTR_H__
#define __BFAD_ATTR_H__
-/**
- * bfad_attr.h VMware driver configuration interface module.
- */
/**
* FC_transport_template FC transport template
@@ -52,12 +49,6 @@ bfad_im_get_starget_port_name(struct scsi_target *starget);
void
bfad_im_get_host_port_id(struct Scsi_Host *shost);
-/**
- * FC transport template entry, issue a LIP.
- */
-int
-bfad_im_issue_fc_host_lip(struct Scsi_Host *shost);
-
struct Scsi_Host*
bfad_os_starget_to_shost(struct scsi_target *starget);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 172c81e25c1c..107848cd3b6d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -46,7 +46,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "2.0.0.0"
+#define BFAD_DRIVER_VERSION "2.1.2.1"
#endif
@@ -62,7 +62,9 @@
#define BFAD_HAL_START_DONE 0x00000010
#define BFAD_PORT_ONLINE 0x00000020
#define BFAD_RPORT_ONLINE 0x00000040
-
+#define BFAD_FCS_INIT_DONE 0x00000080
+#define BFAD_HAL_INIT_FAIL 0x00000100
+#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_PORT_DELETE 0x00000001
/*
@@ -137,12 +139,16 @@ struct bfad_cfg_param_s {
u32 binding_method;
};
-#define BFAD_AEN_MAX_APPS 8
-struct bfad_aen_file_s {
- struct list_head qe;
- struct bfad_s *bfad;
- s32 ri;
- s32 app_id;
+union bfad_tmp_buf {
+ /* From struct bfa_adapter_attr_s */
+ char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char fw_ver[BFA_VERSION_LEN];
+ char optrom_ver[BFA_VERSION_LEN];
+
+ /* From struct bfa_ioc_pci_attr_s */
+ u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */
};
/*
@@ -168,6 +174,7 @@ struct bfad_s {
u32 inst_no; /* BFAD instance number */
u32 bfad_flags;
spinlock_t bfad_lock;
+ struct task_struct *bfad_tsk;
struct bfad_cfg_param_s cfg_data;
struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
int nvec;
@@ -183,18 +190,12 @@ struct bfad_s {
struct bfa_log_mod_s *logmod;
struct bfa_aen_s *aen;
struct bfa_aen_s aen_buf;
- struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS];
- struct list_head file_q;
- struct list_head file_free_q;
+ void *file_map[BFA_AEN_MAX_APP];
struct bfa_plog_s plog_buf;
int ref_count;
bfa_boolean_t ipfc_enabled;
+ union bfad_tmp_buf tmp_buf;
struct fc_host_statistics link_stats;
-
- struct kobject *bfa_kobj;
- struct kobject *ioc_kobj;
- struct kobject *pport_kobj;
- struct kobject *lport_kobj;
};
/*
@@ -258,6 +259,7 @@ bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
struct bfa_port_cfg_s *port_cfg);
bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
bfa_status_t bfad_drv_init(struct bfad_s *bfad);
+bfa_status_t bfad_start_ops(struct bfad_s *bfad);
void bfad_drv_start(struct bfad_s *bfad);
void bfad_uncfg_pport(struct bfad_s *bfad);
void bfad_drv_stop(struct bfad_s *bfad);
@@ -279,6 +281,7 @@ void bfad_drv_uninit(struct bfad_s *bfad);
void bfad_drv_log_level_set(struct bfad_s *bfad);
bfa_status_t bfad_fc4_module_init(void);
void bfad_fc4_module_exit(void);
+int bfad_worker (void *ptr);
void bfad_pci_remove(struct pci_dev *pdev);
int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f788c2a0ab07..f9fc67a25bf2 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -43,11 +43,11 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
struct bfad_s *bfad = drv;
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
+ u8 host_status = DID_OK;
switch (io_status) {
case BFI_IOIM_STS_OK:
bfa_trc(bfad, scsi_status);
- cmnd->result = ScsiResult(DID_OK, scsi_status);
scsi_set_resid(cmnd, 0);
if (sns_len > 0) {
@@ -56,8 +56,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
sns_len = SCSI_SENSE_BUFFERSIZE;
memcpy(cmnd->sense_buffer, sns_info, sns_len);
}
- if (residue > 0)
+ if (residue > 0) {
+ bfa_trc(bfad, residue);
scsi_set_resid(cmnd, residue);
+ if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
+ (scsi_bufflen(cmnd) - residue) <
+ cmnd->underflow) {
+ bfa_trc(bfad, 0);
+ host_status = DID_ERROR;
+ }
+ }
+ cmnd->result = ScsiResult(host_status, scsi_status);
+
break;
case BFI_IOIM_STS_ABORTED:
@@ -167,17 +177,15 @@ bfad_im_info(struct Scsi_Host *shost)
static char bfa_buf[256];
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
- struct bfa_ioc_attr_s ioc_attr;
struct bfad_s *bfad = im_port->bfad;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
- memset(&ioc_attr, 0, sizeof(ioc_attr));
- bfa_get_attr(&bfad->bfa, &ioc_attr);
+ bfa_get_adapter_model(&bfad->bfa, model);
memset(bfa_buf, 0, sizeof(bfa_buf));
snprintf(bfa_buf, sizeof(bfa_buf),
- "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
- ioc_attr.adapter_attr.model, bfad->pci_name,
- BFAD_DRIVER_VERSION);
+ "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
+ model, bfad->pci_name, BFAD_DRIVER_VERSION);
return bfa_buf;
}
@@ -501,16 +509,6 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
}
/**
- * Path TOV processing begin notification -- dummy for linux
- */
-void
-bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
-{
-}
-
-
-
-/**
* Allocate a Scsi_Host for a port.
*/
int
@@ -931,10 +929,9 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
struct Scsi_Host *host = im_port->shost;
struct bfad_s *bfad = im_port->bfad;
struct bfad_port_s *port = im_port->port;
- union attr {
- struct bfa_pport_attr_s pattr;
- struct bfa_ioc_attr_s ioc_attr;
- } attr;
+ struct bfa_pport_attr_s pattr;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN];
+ char fw_ver[BFA_VERSION_LEN];
fc_host_node_name(host) =
bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
@@ -954,20 +951,18 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
/* For fibre channel services type 0x20 */
fc_host_supported_fc4s(host)[7] = 1;
- memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr));
- bfa_get_attr(&bfad->bfa, &attr.ioc_attr);
+ bfa_get_adapter_model(&bfad->bfa, model);
+ bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
- attr.ioc_attr.adapter_attr.model,
- attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
+ model, fw_ver, BFAD_DRIVER_VERSION);
fc_host_supported_speeds(host) = 0;
fc_host_supported_speeds(host) |=
FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
FC_PORTSPEED_1GBIT;
- memset(&attr.pattr, 0, sizeof(attr.pattr));
- bfa_pport_get_attr(&bfad->bfa, &attr.pattr);
- fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize;
+ bfa_fcport_get_attr(&bfad->bfa, &pattr);
+ fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
}
static void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 189a5b29e21a..85ab2da21321 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -23,7 +23,6 @@
#define FCPI_NAME " fcpim"
-void bfad_flags_set(struct bfad_s *bfad, u32 flags);
bfa_status_t bfad_im_module_init(void);
void bfad_im_module_exit(void);
bfa_status_t bfad_im_probe(struct bfad_s *bfad);
@@ -126,7 +125,6 @@ bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
void bfad_os_destroy_workq(struct bfad_im_s *im);
void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
-void bfad_os_init_work(struct bfad_im_port_s *im_port);
void bfad_os_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
@@ -136,9 +134,6 @@ struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
int bfad_os_scsi_add_host(struct Scsi_Host *shost,
struct bfad_im_port_s *im_port, struct bfad_s *bfad);
-/*
- * scsi_host_template entries
- */
void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port,
struct bfad_itnim_s *itnim);
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index 7de8832f6fee..2b7dbecbebca 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -23,8 +23,10 @@ BFA_TRC_FILE(LDRV, INTR);
/**
* bfa_isr BFA driver interrupt functions
*/
-static int msix_disable;
-module_param(msix_disable, int, S_IRUGO | S_IWUSR);
+static int msix_disable_cb;
+static int msix_disable_ct;
+module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
+module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
/**
* Line based interrupt handler.
*/
@@ -141,6 +143,7 @@ bfad_setup_intr(struct bfad_s *bfad)
int error = 0;
u32 mask = 0, i, num_bit = 0, max_bit = 0;
struct msix_entry msix_entries[MAX_MSIX_ENTRY];
+ struct pci_dev *pdev = bfad->pcidev;
/* Call BFA to get the msix map for this PCI function. */
bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -148,7 +151,9 @@ bfad_setup_intr(struct bfad_s *bfad)
/* Set up the msix entry table */
bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
- if (!msix_disable) {
+ if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) ||
+ (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) {
+
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
if (error) {
/*
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index a4b5dd449573..8166e9745ec0 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -37,7 +37,7 @@ BFA_TRC_FILE(FCS, FABRIC);
#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
#define bfa_fcs_fabric_set_opertype(__fabric) do { \
- if (bfa_pport_get_topology((__fabric)->fcs->bfa) \
+ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
== BFA_PPORT_TOPOLOGY_P2P) \
(__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \
else \
@@ -136,8 +136,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_CREATE:
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
bfa_fcs_fabric_init(fabric);
- bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL,
- &fabric->bport.port_cfg, NULL);
+ bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
break;
case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -161,7 +160,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
switch (event) {
case BFA_FCS_FABRIC_SM_START:
- if (bfa_pport_is_linkup(fabric->fcs->bfa)) {
+ if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
bfa_fcs_fabric_login(fabric);
} else
@@ -225,7 +224,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
switch (event) {
case BFA_FCS_FABRIC_SM_CONT_OP:
- bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
if (fabric->auth_reqd && fabric->is_auth) {
@@ -252,7 +251,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_NO_FABRIC:
fabric->fab_type = BFA_FCS_FABRIC_N2N;
- bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
bfa_fcs_fabric_notify_online(fabric);
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
break;
@@ -419,7 +418,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_NO_FABRIC:
bfa_trc(fabric->fcs, fabric->bb_credit);
- bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+ bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
break;
default:
@@ -563,17 +562,15 @@ void
bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
{
struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
- struct bfa_adapter_attr_s adapter_attr;
+ char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
- bfa_os_memset((void *)&adapter_attr, 0,
- sizeof(struct bfa_adapter_attr_s));
- bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr);
+ bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/*
* Model name/number
*/
- strncpy((char *)&port_cfg->sym_name, adapter_attr.model,
+ strncpy((char *)&port_cfg->sym_name, model,
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
@@ -719,10 +716,10 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
u8 alpa = 0;
- if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
- alpa = bfa_pport_get_myalpa(bfa);
+ if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
+ alpa = bfa_fcport_get_myalpa(bfa);
- bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa),
+ bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
fabric->stats.flogi_sent++;
@@ -814,10 +811,10 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
*/
/**
- * Module initialization
+ * Attach time initialization
*/
void
-bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
{
struct bfa_fcs_fabric_s *fabric;
@@ -841,7 +838,13 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
bfa_wc_up(&fabric->wc); /* For the base port */
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
- bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE);
+ bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
+}
+
+void
+bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+{
+ bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
bfa_trc(fcs, 0);
}
@@ -890,6 +893,12 @@ bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
}
+bfa_boolean_t
+bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
+{
+ return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+}
+
enum bfa_pport_type
bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
{
@@ -1165,8 +1174,8 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_os_hton3b(FC_FABRIC_PORT),
n2n_port->reply_oxid, pcfg->pwwn,
- pcfg->nwwn, bfa_pport_get_maxfrsize(bfa),
- bfa_pport_get_rx_bbcredit(bfa));
+ pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
+ bfa_fcport_get_rx_bbcredit(bfa));
bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
@@ -1224,14 +1233,8 @@ bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port,
wwn2str(pwwn_ptr, pwwn);
wwn2str(fwwn_ptr, fwwn);
- switch (event) {
- case BFA_PORT_AEN_FABRIC_NAME_CHANGE:
- bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr,
- fwwn_ptr);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
+ pwwn_ptr, fwwn_ptr);
aen_data.port.pwwn = pwwn;
aen_data.port.fwwn = fwwn;
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
index 8fa7f270ef7b..981d98d542b9 100644
--- a/drivers/scsi/bfa/fcbuild.h
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -72,6 +72,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed)
case RPSC_OP_SPEED_8G:
return BFA_PPORT_SPEED_8GBPS;
+ case RPSC_OP_SPEED_10G:
+ return BFA_PPORT_SPEED_10GBPS;
+
default:
return BFA_PPORT_SPEED_UNKNOWN;
}
@@ -97,6 +100,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed)
case BFA_PPORT_SPEED_8GBPS:
return RPSC_OP_SPEED_8G;
+ case BFA_PPORT_SPEED_10GBPS:
+ return RPSC_OP_SPEED_10G;
+
default:
return RPSC_OP_SPEED_NOT_EST;
}
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 1f3c06efaa9e..8ae4a2cfa85b 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -126,7 +126,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -161,7 +161,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -205,7 +205,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -240,7 +240,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -270,7 +270,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -298,7 +298,7 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -321,7 +321,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -354,7 +354,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(itnim->fcs, event);
}
}
@@ -385,19 +385,8 @@ bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
wwn2str(lpwwn_ptr, lpwwn);
wwn2str(rpwwn_ptr, rpwwn);
- switch (event) {
- case BFA_ITNIM_AEN_ONLINE:
- bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr);
- break;
- case BFA_ITNIM_AEN_OFFLINE:
- bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr);
- break;
- case BFA_ITNIM_AEN_DISCONNECT:
- bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
+ rpwwn_ptr, lpwwn_ptr);
aen_data.itnim.vf_id = rport->port->fabric->vf_id;
aen_data.itnim.ppwwn =
@@ -689,7 +678,6 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
bfa_trc(itnim->fcs, itnim->rport->pwwn);
- bfa_fcb_itnim_tov_begin(itnim->itnim_drv);
}
/**
@@ -822,22 +810,3 @@ void
bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
{
}
-
-/**
- * Module initialization
- */
-void
-bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- * Module cleanup
- */
-void
-bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs)
-{
- bfa_fcs_modexit_comp(fcs);
-}
-
-
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
index eee960820f86..244c3f00c50c 100644
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -29,6 +29,7 @@
/*
* fcs friend functions: only between fcs modules
*/
+void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
@@ -46,6 +47,7 @@ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
struct fchs_s *fchs, u16 len);
u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
+bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
index 61e9e2687de3..11e6e7bce9f6 100644
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ b/drivers/scsi/bfa/fcs_fcpim.h
@@ -34,11 +34,6 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
-/*
- * Modudle init/cleanup routines.
- */
-void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs);
void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
u16 len);
#endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
index ae744ba35671..a6508c8ab184 100644
--- a/drivers/scsi/bfa/fcs_lport.h
+++ b/drivers/scsi/bfa/fcs_lport.h
@@ -84,9 +84,10 @@ void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs,
* Following routines will be called by Fabric to indicate port
* online/offline to vport.
*/
-void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
- u16 vf_id, struct bfa_port_cfg_s *port_cfg,
- struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
+ uint16_t vf_id, struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
+ struct bfa_port_cfg_s *port_cfg);
void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
index abb65191dd27..408c06a7d164 100644
--- a/drivers/scsi/bfa/fcs_port.h
+++ b/drivers/scsi/bfa/fcs_port.h
@@ -26,7 +26,6 @@
/*
* fcs friend functions: only between fcs modules
*/
-void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
#endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
index f601e9d74236..9c8d1d292380 100644
--- a/drivers/scsi/bfa/fcs_rport.h
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -24,9 +24,6 @@
#include <fcs/bfa_fcs_rport.h>
-void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs);
-
void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
u16 len);
void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
index 96f1bdcb31ed..f591072214fe 100644
--- a/drivers/scsi/bfa/fcs_uf.h
+++ b/drivers/scsi/bfa/fcs_uf.h
@@ -26,7 +26,6 @@
/*
* fcs friend functions: only between fcs modules
*/
-void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
#endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
index 9e80b6a97b7f..13c32ebf946c 100644
--- a/drivers/scsi/bfa/fcs_vport.h
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -22,18 +22,10 @@
#include <fcs/bfa_fcs_vport.h>
#include <defs/bfa_defs_pci.h>
-/*
- * Modudle init/cleanup routines.
- */
-
-void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs);
-
void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
-u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs);
#endif /* __FCS_VPORT_H__ */
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index df2a1e54e16b..8f17076d1a87 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -116,6 +116,9 @@ static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
enum port_fdmi_event event);
+static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
+ enum port_fdmi_event event);
+
/**
* Start in offline state - awaiting MS to send start.
*/
@@ -155,7 +158,7 @@ bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -180,7 +183,7 @@ bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -227,7 +230,7 @@ bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -255,7 +258,7 @@ bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -283,7 +286,7 @@ bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -328,7 +331,7 @@ bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -356,7 +359,7 @@ bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -384,7 +387,7 @@ bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -428,7 +431,7 @@ bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -456,7 +459,7 @@ bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
@@ -475,10 +478,24 @@ bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(port->fcs, event);
}
}
+/**
+ * FDMI is disabled state.
+ */
+static void
+bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
+ enum port_fdmi_event event)
+{
+ struct bfa_fcs_port_s *port = fdmi->ms->port;
+
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ /* No op State. It can only be enabled at Driver Init. */
+}
/**
* RHBA : Register HBA Attributes.
@@ -1097,36 +1114,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi,
{
struct bfa_fcs_port_s *port = fdmi->ms->port;
struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
- struct bfa_adapter_attr_s adapter_attr;
bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
- bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s));
-
- bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr);
-
- strncpy(hba_attr->manufacturer, adapter_attr.manufacturer,
- sizeof(adapter_attr.manufacturer));
-
- strncpy(hba_attr->serial_num, adapter_attr.serial_num,
- sizeof(adapter_attr.serial_num));
- strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model));
-
- strncpy(hba_attr->model_desc, adapter_attr.model_descr,
- sizeof(hba_attr->model_desc));
-
- strncpy(hba_attr->hw_version, adapter_attr.hw_ver,
- sizeof(hba_attr->hw_version));
+ bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
+ hba_attr->manufacturer);
+ bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
+ hba_attr->serial_num);
+ bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
+ bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
+ bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
+ bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
+ hba_attr->option_rom_ver);
+ bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
strncpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver,
- sizeof(hba_attr->option_rom_ver));
-
- strncpy(hba_attr->fw_version, adapter_attr.fw_ver,
- sizeof(hba_attr->fw_version));
-
strncpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
@@ -1158,7 +1162,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi,
/*
* get pport attributes from hal
*/
- bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
/*
* get FC4 type Bitmask
@@ -1201,7 +1205,10 @@ bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
fdmi->ms = ms;
- bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
+ if (ms->port->fcs->fdmi_enabled)
+ bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
+ else
+ bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
}
void
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
index d9cbc2a783d4..6abbab005db6 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -18,21 +18,24 @@
#define __BFA_AEN_H__
#include "defs/bfa_defs_aen.h"
+#include "defs/bfa_defs_status.h"
+#include "cs/bfa_debug.h"
-#define BFA_AEN_MAX_ENTRY 512
+#define BFA_AEN_MAX_ENTRY 512
-extern s32 bfa_aen_max_cfg_entry;
+extern int bfa_aen_max_cfg_entry;
struct bfa_aen_s {
void *bfad;
- s32 max_entry;
- s32 write_index;
- s32 read_index;
- u32 bfad_num;
- u32 seq_num;
+ int max_entry;
+ int write_index;
+ int read_index;
+ int bfad_num;
+ int seq_num;
void (*aen_cb_notify)(void *bfad);
void (*gettimeofday)(struct bfa_timeval_s *tv);
- struct bfa_trc_mod_s *trcmod;
- struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
+ struct bfa_trc_mod_s *trcmod;
+ int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
+ struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
};
@@ -45,48 +48,49 @@ bfa_aen_set_max_cfg_entry(int max_entry)
bfa_aen_max_cfg_entry = max_entry;
}
-static inline s32
+static inline int
bfa_aen_get_max_cfg_entry(void)
{
return bfa_aen_max_cfg_entry;
}
-static inline s32
+static inline int
bfa_aen_get_meminfo(void)
{
return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
}
-static inline s32
+static inline int
bfa_aen_get_wi(struct bfa_aen_s *aen)
{
return aen->write_index;
}
-static inline s32
+static inline int
bfa_aen_get_ri(struct bfa_aen_s *aen)
{
return aen->read_index;
}
-static inline s32
-bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index)
+static inline int
+bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id)
{
- return ((aen->write_index + aen->max_entry) - read_index)
+ bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
+ return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
% aen->max_entry;
}
-s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
- void *bfad, u32 inst_id, void (*aen_cb_notify)(void *),
+int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
+ void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
void (*gettimeofday)(struct bfa_timeval_s *));
-s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
+void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
int aen_type, union bfa_aen_data_u *aen_data);
-s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry,
- s32 entry_space, s32 rii, s32 *ri_arr,
- s32 ri_arr_cnt);
+bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
+ struct bfa_aen_entry_s *aen_entry,
+ int entry_req, enum bfa_aen_app app_id, int *entry_ret);
-s32 bfa_aen_get_inst(struct bfa_aen_s *aen);
+int bfa_aen_get_inst(struct bfa_aen_s *aen);
#endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index d4bc0d9fa42c..1f5966cfbd16 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -106,6 +106,26 @@ struct bfa_sge_s {
bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
#define bfa_ioc_clear_stats(__bfa) \
bfa_ioc_clr_stats(&(__bfa)->ioc)
+#define bfa_get_nports(__bfa) \
+ bfa_ioc_get_nports(&(__bfa)->ioc)
+#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
+ bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
+#define bfa_get_adapter_model(__bfa, __model) \
+ bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
+#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
+ bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
+#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
+ bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
+#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
+ bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
+#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
+ bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
+#define bfa_get_ioc_state(__bfa) \
+ bfa_ioc_get_state(&(__bfa)->ioc)
+#define bfa_get_type(__bfa) \
+ bfa_ioc_get_type(&(__bfa)->ioc)
+#define bfa_get_mac(__bfa) \
+ bfa_ioc_get_mac(&(__bfa)->ioc)
/*
* bfa API functions
@@ -161,6 +181,7 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
+void bfa_chip_reset(struct bfa_s *bfa);
void bfa_cb_ioc_disable(void *bfad);
void bfa_timer_tick(struct bfa_s *bfa);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
@@ -171,6 +192,7 @@ void bfa_timer_tick(struct bfa_s *bfa);
*/
bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
+void bfa_debug_fwsave_clear(struct bfa_s *bfa);
#include "bfa_priv.h"
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 268d956bad89..1349b99a3c6d 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -26,6 +26,7 @@ struct bfa_fcxp_s;
#include <defs/bfa_defs_pport.h>
#include <defs/bfa_defs_rport.h>
#include <defs/bfa_defs_qos.h>
+#include <defs/bfa_defs_fcport.h>
#include <cs/bfa_sm.h>
#include <bfa.h>
@@ -35,7 +36,7 @@ struct bfa_fcxp_s;
struct bfa_rport_info_s {
u16 max_frmsz; /* max rcv pdu size */
u32 pid:24, /* remote port ID */
- lp_tag:8;
+ lp_tag:8; /* tag */
u32 local_pid:24, /* local port ID */
cisc:8; /* CIRO supported */
u8 fc_class; /* supported FC classes. enum fc_cos */
@@ -54,7 +55,7 @@ struct bfa_rport_s {
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
u16 rport_tag; /* BFA rport tag */
- struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */
+ struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */
@@ -101,7 +102,7 @@ struct bfa_uf_buf_s {
struct bfa_uf_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* bfa instance */
- u16 uf_tag; /* identifying tag f/w messages */
+ u16 uf_tag; /* identifying tag fw msgs */
u16 vf_id;
u16 src_rport_handle;
u16 rsvd;
@@ -127,7 +128,7 @@ struct bfa_lps_s {
u8 reqq; /* lport request queue */
u8 alpa; /* ALPA for loop topologies */
u32 lp_pid; /* lport port ID */
- bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/
+ bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */
bfa_boolean_t auth_en; /* enable authentication */
bfa_boolean_t auth_req; /* authentication required */
bfa_boolean_t npiv_en; /* NPIV is allowed by peer */
@@ -151,60 +152,69 @@ struct bfa_lps_s {
bfa_eproto_status_t ext_status;
};
+#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
+
/*
* bfa pport API functions
*/
-bfa_status_t bfa_pport_enable(struct bfa_s *bfa);
-bfa_status_t bfa_pport_disable(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa,
+bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
enum bfa_pport_speed speed);
-enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa,
+enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
enum bfa_pport_topology topo);
-enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
-bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
-u8 bfa_pport_get_myalpa(struct bfa_s *bfa);
-bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
-u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa);
-u32 bfa_pport_mypid(struct bfa_s *bfa);
-u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa);
-bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
-bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa);
-bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
-void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
-wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
-bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa,
- union bfa_pport_stats_u *stats,
- bfa_cb_pport_t cbfn, void *cbarg);
-bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
- void *cbarg);
-void bfa_pport_event_register(struct bfa_s *bfa,
+enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
+bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
+u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
+u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
+u32 bfa_fcport_mypid(struct bfa_s *bfa);
+u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
+bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
+void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
+wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
+void bfa_fcport_event_register(struct bfa_s *bfa,
void (*event_cbfn) (void *cbarg,
bfa_pport_event_t event), void *event_cbarg);
-bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa);
-void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
-void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
-bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa,
+bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
+void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
+bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
enum bfa_pport_speed speed);
-enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa);
+enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
-void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
-void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status);
-void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
+void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
bfa_boolean_t link_e2e_beacon);
void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
-void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr);
-void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
+void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
+ struct bfa_qos_attr_s *qos_attr);
+void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
struct bfa_qos_vc_attr_s *qos_vc_attr);
-bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa,
- union bfa_pport_stats_u *stats,
+bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
+ union bfa_fcport_stats_u *stats,
bfa_cb_pport_t cbfn, void *cbarg);
-bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
void *cbarg);
-bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa);
-bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
+ union bfa_fcport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+ void *cbarg);
+
+bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
+ union bfa_fcport_stats_u *stats,
+ bfa_cb_pport_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+ void *cbarg);
/*
* bfa rport API functions
@@ -293,6 +303,7 @@ void bfa_uf_free(struct bfa_uf_s *uf);
* bfa lport service api
*/
+u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
void bfa_lps_delete(struct bfa_lps_s *lps);
void bfa_lps_discard(struct bfa_lps_s *lps);
@@ -315,10 +326,12 @@ wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
+mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
index e407103fa565..f71087448222 100644
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ b/drivers/scsi/bfa/include/bfa_timer.h
@@ -41,7 +41,7 @@ struct bfa_timer_mod_s {
struct list_head timer_q;
};
-#define BFA_TIMER_FREQ 500 /**< specified in millisecs */
+#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
void bfa_timer_beat(struct bfa_timer_mod_s *mod);
void bfa_timer_init(struct bfa_timer_mod_s *mod);
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
index 7042c18e542d..a550e80cabd2 100644
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -143,8 +143,8 @@ enum bfi_mclass {
BFI_MC_IOC = 1, /* IO Controller (IOC) */
BFI_MC_DIAG = 2, /* Diagnostic Msgs */
BFI_MC_FLASH = 3, /* Flash message class */
- BFI_MC_CEE = 4,
- BFI_MC_FC_PORT = 5, /* FC port */
+ BFI_MC_CEE = 4, /* CEE */
+ BFI_MC_FCPORT = 5, /* FC port */
BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
BFI_MC_LL = 7, /* Link Layer */
BFI_MC_UF = 8, /* Unsolicited frame receive */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
index b3bb52b565b1..a51ee61ddb19 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
@@ -177,7 +177,21 @@
#define __PSS_LMEM_INIT_EN 0x00000100
#define __PSS_LPU1_RESET 0x00000002
#define __PSS_LPU0_RESET 0x00000001
-
+#define PSS_ERR_STATUS_REG 0x00018810
+#define __PSS_LMEM1_CORR_ERR 0x00000800
+#define __PSS_LMEM0_CORR_ERR 0x00000400
+#define __PSS_LMEM1_UNCORR_ERR 0x00000200
+#define __PSS_LMEM0_UNCORR_ERR 0x00000100
+#define __PSS_BAL_PERR 0x00000080
+#define __PSS_DIP_IF_ERR 0x00000040
+#define __PSS_IOH_IF_ERR 0x00000020
+#define __PSS_TDS_IF_ERR 0x00000010
+#define __PSS_RDS_IF_ERR 0x00000008
+#define __PSS_SGM_IF_ERR 0x00000004
+#define __PSS_LPU1_RAM_ERR 0x00000002
+#define __PSS_LPU0_RAM_ERR 0x00000001
+#define ERR_SET_REG 0x00018818
+#define __PSS_ERR_STATUS_SET 0x00000fff
/*
* These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
index d3caa58c0a0a..57a8497105af 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -430,6 +430,31 @@ enum {
#define __PSS_LMEM_INIT_EN 0x00000100
#define __PSS_LPU1_RESET 0x00000002
#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810
+#define __PSS_LPU1_TCM_READ_ERR 0x00200000
+#define __PSS_LPU0_TCM_READ_ERR 0x00100000
+#define __PSS_LMEM5_CORR_ERR 0x00080000
+#define __PSS_LMEM4_CORR_ERR 0x00040000
+#define __PSS_LMEM3_CORR_ERR 0x00020000
+#define __PSS_LMEM2_CORR_ERR 0x00010000
+#define __PSS_LMEM1_CORR_ERR 0x00008000
+#define __PSS_LMEM0_CORR_ERR 0x00004000
+#define __PSS_LMEM5_UNCORR_ERR 0x00002000
+#define __PSS_LMEM4_UNCORR_ERR 0x00001000
+#define __PSS_LMEM3_UNCORR_ERR 0x00000800
+#define __PSS_LMEM2_UNCORR_ERR 0x00000400
+#define __PSS_LMEM1_UNCORR_ERR 0x00000200
+#define __PSS_LMEM0_UNCORR_ERR 0x00000100
+#define __PSS_BAL_PERR 0x00000080
+#define __PSS_DIP_IF_ERR 0x00000040
+#define __PSS_IOH_IF_ERR 0x00000020
+#define __PSS_TDS_IF_ERR 0x00000010
+#define __PSS_RDS_IF_ERR 0x00000008
+#define __PSS_SGM_IF_ERR 0x00000004
+#define __PSS_LPU1_RAM_ERR 0x00000002
+#define __PSS_LPU0_RAM_ERR 0x00000001
+#define ERR_SET_REG 0x00018818
+#define __PSS_ERR_STATUS_SET 0x003fffff
#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
#define __RXQ0_ADD_VECTORS_P 0x80000000
#define __RXQ0_STOP_P 0x40000000
@@ -589,6 +614,7 @@ enum {
#define __HFN_INT_MBOX_LPU1 0x00200000U
#define __HFN_INT_MBOX1_LPU0 0x00400000U
#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
#define __HFN_INT_CPE_MASK 0x000000ffU
#define __HFN_INT_RME_MASK 0x0000ff00U
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index 96ef05670659..a0158aac0024 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -123,7 +123,7 @@ enum bfi_ioc_state {
BFI_IOC_DISABLING = 5, /* IOC is being disabled */
BFI_IOC_DISABLED = 6, /* IOC is disabled */
BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */
- BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */
+ BFI_IOC_FAIL = 8, /* IOC heart-beat failure */
BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */
};
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
index c59d47badb4b..7ed31bbb8696 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -30,6 +30,7 @@ enum bfi_lps_h2i_msgs {
enum bfi_lps_i2h_msgs {
BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
+ BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
};
struct bfi_lps_login_req_s {
@@ -77,6 +78,12 @@ struct bfi_lps_logout_rsp_s {
u8 rsvd[2];
};
+struct bfi_lps_cvl_event_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 lp_tag;
+ u8 rsvd[3];
+};
+
union bfi_lps_h2i_msg_u {
struct bfi_mhdr_s *msg;
struct bfi_lps_login_req_s *login_req;
@@ -87,6 +94,7 @@ union bfi_lps_i2h_msg_u {
struct bfi_msg_s *msg;
struct bfi_lps_login_rsp_s *login_rsp;
struct bfi_lps_logout_rsp_s *logout_rsp;
+ struct bfi_lps_cvl_event_s *cvl_event;
};
#pragma pack()
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
index c96d246851af..50dcf45c7470 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h
@@ -22,163 +22,97 @@
#pragma pack(1)
-enum bfi_pport_h2i {
- BFI_PPORT_H2I_ENABLE_REQ = (1),
- BFI_PPORT_H2I_DISABLE_REQ = (2),
- BFI_PPORT_H2I_GET_STATS_REQ = (3),
- BFI_PPORT_H2I_CLEAR_STATS_REQ = (4),
- BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5),
- BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6),
- BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7),
- BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8),
- BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9),
+enum bfi_fcport_h2i {
+ BFI_FCPORT_H2I_ENABLE_REQ = (1),
+ BFI_FCPORT_H2I_DISABLE_REQ = (2),
+ BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3),
+ BFI_FCPORT_H2I_STATS_GET_REQ = (4),
+ BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5),
};
-enum bfi_pport_i2h {
- BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
- BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
- BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
- BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
- BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5),
- BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6),
- BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7),
- BFI_PPORT_I2H_EVENT = BFA_I2HM(8),
- BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9),
- BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10),
+enum bfi_fcport_i2h {
+ BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1),
+ BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2),
+ BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3),
+ BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4),
+ BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5),
+ BFI_FCPORT_I2H_EVENT = BFA_I2HM(6),
};
/**
* Generic REQ type
*/
-struct bfi_pport_generic_req_s {
+struct bfi_fcport_req_s {
struct bfi_mhdr_s mh; /* msg header */
- u32 msgtag; /* msgtag for reply */
+ u32 msgtag; /* msgtag for reply */
};
/**
* Generic RSP type
*/
-struct bfi_pport_generic_rsp_s {
+struct bfi_fcport_rsp_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 status; /* port enable status */
- u8 rsvd[3];
- u32 msgtag; /* msgtag for reply */
+ u8 status; /* port enable status */
+ u8 rsvd[3];
+ u32 msgtag; /* msgtag for reply */
};
/**
- * BFI_PPORT_H2I_ENABLE_REQ
+ * BFI_FCPORT_H2I_ENABLE_REQ
*/
-struct bfi_pport_enable_req_s {
+struct bfi_fcport_enable_req_s {
struct bfi_mhdr_s mh; /* msg header */
- u32 rsvd1;
- wwn_t nwwn; /* node wwn of physical port */
- wwn_t pwwn; /* port wwn of physical port */
- struct bfa_pport_cfg_s port_cfg; /* port configuration */
- union bfi_addr_u stats_dma_addr; /* DMA address for stats */
- u32 msgtag; /* msgtag for reply */
- u32 rsvd2;
+ u32 rsvd1;
+ wwn_t nwwn; /* node wwn of physical port */
+ wwn_t pwwn; /* port wwn of physical port */
+ struct bfa_pport_cfg_s port_cfg; /* port configuration */
+ union bfi_addr_u stats_dma_addr; /* DMA address for stats */
+ u32 msgtag; /* msgtag for reply */
+ u32 rsvd2;
};
/**
- * BFI_PPORT_I2H_ENABLE_RSP
+ * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
*/
-#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_DISABLE_REQ
- */
-#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_DISABLE_RSP
- */
-#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_GET_STATS_REQ
- */
-#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_GET_STATS_RSP
- */
-#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_STATS_REQ
- */
-#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_CLEAR_STATS_RSP
- */
-#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_GET_QOS_STATS_REQ
- */
-#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_H2I_GET_QOS_STATS_RSP
- */
-#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
- */
-#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP
- */
-#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
- */
-struct bfi_pport_set_svc_params_req_s {
+struct bfi_fcport_set_svc_params_req_s {
struct bfi_mhdr_s mh; /* msg header */
- u16 tx_bbcredit; /* Tx credits */
- u16 rsvd;
+ u16 tx_bbcredit; /* Tx credits */
+ u16 rsvd;
};
/**
- * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP
- */
-
-/**
- * BFI_PPORT_I2H_EVENT
+ * BFI_FCPORT_I2H_EVENT
*/
-struct bfi_pport_event_s {
+struct bfi_fcport_event_s {
struct bfi_mhdr_s mh; /* common msg header */
struct bfa_pport_link_s link_state;
};
-union bfi_pport_h2i_msg_u {
+/**
+ * fcport H2I message
+ */
+union bfi_fcport_h2i_msg_u {
struct bfi_mhdr_s *mhdr;
- struct bfi_pport_enable_req_s *penable;
- struct bfi_pport_generic_req_s *pdisable;
- struct bfi_pport_generic_req_s *pgetstats;
- struct bfi_pport_generic_req_s *pclearstats;
- struct bfi_pport_set_svc_params_req_s *psetsvcparams;
- struct bfi_pport_get_qos_stats_req_s *pgetqosstats;
- struct bfi_pport_generic_req_s *pclearqosstats;
+ struct bfi_fcport_enable_req_s *penable;
+ struct bfi_fcport_req_s *pdisable;
+ struct bfi_fcport_set_svc_params_req_s *psetsvcparams;
+ struct bfi_fcport_req_s *pstatsget;
+ struct bfi_fcport_req_s *pstatsclear;
};
-union bfi_pport_i2h_msg_u {
+/**
+ * fcport I2H message
+ */
+union bfi_fcport_i2h_msg_u {
struct bfi_msg_s *msg;
- struct bfi_pport_generic_rsp_s *enable_rsp;
- struct bfi_pport_disable_rsp_s *disable_rsp;
- struct bfi_pport_generic_rsp_s *getstats_rsp;
- struct bfi_pport_clear_stats_rsp_s *clearstats_rsp;
- struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp;
- struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp;
- struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp;
- struct bfi_pport_event_s *event;
+ struct bfi_fcport_rsp_s *penable_rsp;
+ struct bfi_fcport_rsp_s *pdisable_rsp;
+ struct bfi_fcport_rsp_s *psetsvcparams_rsp;
+ struct bfi_fcport_rsp_s *pstatsget_rsp;
+ struct bfi_fcport_rsp_s *pstatsclear_rsp;
+ struct bfi_fcport_event_s *event;
};
#pragma pack()
#endif /* __BFI_PPORT_H__ */
-
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
index 43ba7064e81a..a75a1f3be315 100644
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
@@ -31,6 +31,10 @@
enum {
BFA_TRC_CNA_CEE = 1,
BFA_TRC_CNA_PORT = 2,
+ BFA_TRC_CNA_IOC = 3,
+ BFA_TRC_CNA_DIAG = 4,
+ BFA_TRC_CNA_IOC_CB = 5,
+ BFA_TRC_CNA_IOC_CT = 6,
};
#endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
index 761cbe22130a..bc334e0a93fa 100644
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ b/drivers/scsi/bfa/include/cs/bfa_log.h
@@ -157,7 +157,7 @@ typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id,
struct bfa_log_mod_s {
- char instance_info[16]; /* instance info */
+ char instance_info[BFA_STRING_32]; /* instance info */
int log_level[BFA_LOG_MODULE_ID_MAX + 1];
/* log level for modules */
bfa_log_cb_t cbfn; /* callback function */
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h
index 670f86e5fc6e..f5bef63b5877 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/include/cs/bfa_plog.h
@@ -80,7 +80,8 @@ enum bfa_plog_mid {
BFA_PL_MID_HAL_FCXP = 4,
BFA_PL_MID_HAL_UF = 5,
BFA_PL_MID_FCS = 6,
- BFA_PL_MID_MAX = 7
+ BFA_PL_MID_LPS = 7,
+ BFA_PL_MID_MAX = 8
};
#define BFA_PL_MID_STRLEN 8
@@ -118,7 +119,11 @@ enum bfa_plog_eid {
BFA_PL_EID_RSCN = 17,
BFA_PL_EID_DEBUG = 18,
BFA_PL_EID_MISC = 19,
- BFA_PL_EID_MAX = 20
+ BFA_PL_EID_FIP_FCF_DISC = 20,
+ BFA_PL_EID_FIP_FCF_CVL = 21,
+ BFA_PL_EID_LOGIN = 22,
+ BFA_PL_EID_LOGO = 23,
+ BFA_PL_EID_MAX = 24
};
#define BFA_PL_ENAME_STRLEN 8
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
index b0a92baf6657..11fba9082f05 100644
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -23,6 +23,14 @@
#define __BFA_SM_H__
typedef void (*bfa_sm_t)(void *sm, int event);
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
index 4c81a613db3d..35244698fcdc 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
@@ -30,6 +30,16 @@
#include <defs/bfa_defs_audit.h>
#include <defs/bfa_defs_ethport.h>
+#define BFA_AEN_MAX_APP 5
+
+enum bfa_aen_app {
+ bfa_aen_app_bcu = 0, /* No thread for bcu */
+ bfa_aen_app_hcm = 1,
+ bfa_aen_app_cim = 2,
+ bfa_aen_app_snia = 3,
+ bfa_aen_app_test = 4, /* To be removed after unit test */
+};
+
enum bfa_aen_category {
BFA_AEN_CAT_ADAPTER = 1,
BFA_AEN_CAT_PORT = 2,
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
index dd19c83aba58..45df32820911 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -23,6 +23,7 @@
#define PRIVATE_KEY 19009
#define KEY_LEN 32399
#define BFA_AUTH_SECRET_STRING_LEN 256
+#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE
#define BFA_AUTH_FAIL_TIMEOUT 0xFF
/**
@@ -41,6 +42,27 @@ enum bfa_auth_status {
BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */
};
+enum bfa_auth_rej_code {
+ BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */
+ BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */
+};
+
+/**
+ * Authentication reject codes
+ */
+enum bfa_auth_rej_code_exp {
+ BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */
+ BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */
+ BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */
+ BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */
+ BFA_AUTH_AUTH_FAILED = 5, /* auth failed */
+ BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */
+ BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */
+ BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */
+ BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */
+ BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */
+};
+
struct auth_proto_stats_s {
u32 auth_rjts;
u32 auth_negs;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index 520a22f52dd1..b0ac9ac15c5d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -28,10 +28,6 @@
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
-
-/* FIXME: this is coming from the protocol spec. Can the host & apps share the
- protocol .h files ?
- */
#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
@@ -94,9 +90,10 @@ struct bfa_cee_dcbx_cfg_s {
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
enum bfa_cee_status_e {
- CEE_PHY_DOWN = 0,
- CEE_PHY_UP = 1,
- CEE_UP = 2,
+ CEE_UP = 0,
+ CEE_PHY_UP = 1,
+ CEE_LOOPBACK = 2,
+ CEE_PHY_DOWN = 3,
};
/* CEE Query */
@@ -107,7 +104,8 @@ struct bfa_cee_attr_s {
struct bfa_cee_dcbx_cfg_s dcbx_remote;
mac_t src_mac;
u8 link_speed;
- u8 filler[3];
+ u8 nw_priority;
+ u8 filler[2];
};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
index 57049805762b..50382dd2ab41 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -21,6 +21,7 @@
/**
* Driver statistics
*/
+struct bfa_driver_stats_s {
u16 tm_io_abort;
u16 tm_io_abort_comp;
u16 tm_lun_reset;
@@ -34,7 +35,7 @@
u64 output_req;
u64 input_words;
u64 output_words;
-} bfa_driver_stats_t;
+};
#endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
index 79f9b3e146f7..b4fa0923aa89 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
@@ -19,6 +19,7 @@
#define __BFA_DEFS_ETHPORT_H__
#include <defs/bfa_defs_status.h>
+#include <defs/bfa_defs_port.h>
#include <protocol/types.h>
#include <cna/pstats/phyport_defs.h>
#include <cna/pstats/ethport_defs.h>
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
new file mode 100644
index 000000000000..a07ef4a3cd78
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * bfa_defs_fcport.h
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_DEFS_FCPORT_H__
+#define __BFA_DEFS_FCPORT_H__
+
+#include <defs/bfa_defs_types.h>
+#include <protocol/types.h>
+
+#pragma pack(1)
+
+/**
+ * FCoE statistics
+ */
+struct bfa_fcoe_stats_s {
+ u64 secs_reset; /* Seconds since stats reset */
+ u64 cee_linkups; /* CEE link up */
+ u64 cee_linkdns; /* CEE link down */
+ u64 fip_linkups; /* FIP link up */
+ u64 fip_linkdns; /* FIP link down */
+ u64 fip_fails; /* FIP failures */
+ u64 mac_invalids; /* Invalid mac assignments */
+ u64 vlan_req; /* Vlan requests */
+ u64 vlan_notify; /* Vlan notifications */
+ u64 vlan_err; /* Vlan notification errors */
+ u64 vlan_timeouts; /* Vlan request timeouts */
+ u64 vlan_invalids; /* Vlan invalids */
+ u64 disc_req; /* Discovery requests */
+ u64 disc_rsp; /* Discovery responses */
+ u64 disc_err; /* Discovery error frames */
+ u64 disc_unsol; /* Discovery unsolicited */
+ u64 disc_timeouts; /* Discovery timeouts */
+ u64 disc_fcf_unavail; /* Discovery FCF not avail */
+ u64 linksvc_unsupp; /* FIP link service req unsupp. */
+ u64 linksvc_err; /* FIP link service req errors */
+ u64 logo_req; /* FIP logo */
+ u64 clrvlink_req; /* Clear virtual link requests */
+ u64 op_unsupp; /* FIP operation unsupp. */
+ u64 untagged; /* FIP untagged frames */
+ u64 txf_ucast; /* Tx FCoE unicast frames */
+ u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
+ u64 txf_ucast_octets; /* Tx FCoE unicast octets */
+ u64 txf_mcast; /* Tx FCoE mutlicast frames */
+ u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */
+ u64 txf_mcast_octets; /* Tx FCoE multicast octets */
+ u64 txf_bcast; /* Tx FCoE broadcast frames */
+ u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
+ u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
+ u64 txf_timeout; /* Tx timeouts */
+ u64 txf_parity_errors; /* Transmit parity err */
+ u64 txf_fid_parity_errors; /* Transmit FID parity err */
+ u64 tx_pause; /* Tx pause frames */
+ u64 tx_zero_pause; /* Tx zero pause frames */
+ u64 tx_first_pause; /* Tx first pause frames */
+ u64 rx_pause; /* Rx pause frames */
+ u64 rx_zero_pause; /* Rx zero pause frames */
+ u64 rx_first_pause; /* Rx first pause frames */
+ u64 rxf_ucast_octets; /* Rx unicast octets */
+ u64 rxf_ucast; /* Rx unicast frames */
+ u64 rxf_ucast_vlan; /* Rx unicast vlan frames */
+ u64 rxf_mcast_octets; /* Rx multicast octets */
+ u64 rxf_mcast; /* Rx multicast frames */
+ u64 rxf_mcast_vlan; /* Rx multicast vlan frames */
+ u64 rxf_bcast_octets; /* Rx broadcast octests */
+ u64 rxf_bcast; /* Rx broadcast frames */
+ u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */
+};
+
+/**
+ * QoS or FCoE stats (fcport stats excluding physical FC port stats)
+ */
+union bfa_fcport_stats_u {
+ struct bfa_qos_stats_s fcqos;
+ struct bfa_fcoe_stats_s fcoe;
+};
+
+#pragma pack()
+
+#endif /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
deleted file mode 100644
index 9ccf53bef65a..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_DEFS_IM_COMMON_H__
-#define __BFA_DEFS_IM_COMMON_H__
-
-#define BFA_ADAPTER_NAME_LEN 256
-#define BFA_ADAPTER_GUID_LEN 256
-#define RESERVED_VLAN_NAME L"PORT VLAN"
-#define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN"
-
- u64 tx_pkt_cnt;
- u64 rx_pkt_cnt;
- u32 duration;
- u8 status;
-} bfa_im_stats_t, *pbfa_im_stats_t;
-
-#endif /* __BFA_DEFS_IM_COMMON_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
deleted file mode 100644
index a486a7eb81d6..000000000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_DEFS_IM_TEAM_H__
-#define __BFA_DEFS_IM_TEAM_H__
-
-#include <protocol/types.h>
-
-#define BFA_TEAM_MAX_PORTS 8
-#define BFA_TEAM_NAME_LEN 256
-#define BFA_MAX_NUM_TEAMS 16
-#define BFA_TEAM_INVALID_DELAY -1
-
- BFA_LACP_RATE_SLOW = 1,
- BFA_LACP_RATE_FAST
-} bfa_im_lacp_rate_t;
-
- BFA_TEAM_MODE_FAIL_OVER = 1,
- BFA_TEAM_MODE_FAIL_BACK,
- BFA_TEAM_MODE_LACP,
- BFA_TEAM_MODE_NONE
-} bfa_im_team_mode_t;
-
- BFA_XMIT_POLICY_L2 = 1,
- BFA_XMIT_POLICY_L3_L4
-} bfa_im_xmit_policy_t;
-
- bfa_im_team_mode_t team_mode;
- bfa_im_lacp_rate_t lacp_rate;
- bfa_im_xmit_policy_t xmit_policy;
- int delay;
- wchar_t primary[BFA_ADAPTER_NAME_LEN];
- wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
- mac_t mac;
- u16 num_ports;
- u16 num_vlans;
- u16 vlan_list[BFA_MAX_VLANS_PER_PORT];
- wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN];
- wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN];
-} bfa_im_team_attr_t;
-
- wchar_t team_name[BFA_TEAM_NAME_LEN];
- bfa_im_xmit_policy_t xmit_policy;
- int delay;
- wchar_t primary[BFA_ADAPTER_NAME_LEN];
- wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN];
-} bfa_im_team_edit_t, *pbfa_im_team_edit_t;
-
- wchar_t team_name[BFA_TEAM_NAME_LEN];
- bfa_im_team_mode_t team_mode;
- mac_t mac;
-} bfa_im_team_info_t;
-
- bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS];
- u16 num_teams;
-} bfa_im_team_list_t, *pbfa_im_team_list_t;
-
-#endif /* __BFA_DEFS_IM_TEAM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
index b1d532da3a9d..8d8e6a966537 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -126,6 +126,7 @@ struct bfa_ioc_attr_s {
struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
struct bfa_ioc_pci_attr_s pci_attr;
u8 port_id; /* port number */
+ u8 rsvd[7]; /*!< 64bit align */
};
/**
@@ -143,8 +144,8 @@ enum bfa_ioc_aen_event {
* BFA IOC level event data, now just a place holder
*/
struct bfa_ioc_aen_data_s {
- enum bfa_ioc_type_e ioc_type;
wwn_t pwwn;
+ s16 ioc_type;
mac_t mac;
};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
index d76bcbd9820f..c290fb13d2d1 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -26,6 +26,8 @@
#define BFA_IOCFC_INTR_DELAY 1125
#define BFA_IOCFC_INTR_LATENCY 225
+#define BFA_IOCFCOE_INTR_DELAY 25
+#define BFA_IOCFCOE_INTR_LATENCY 5
/**
* Interrupt coalescing configuration.
@@ -50,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
u16 num_fcxp_reqs; /* unassisted FC exchanges */
u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
- u8 rsvd;
+ u8 rsvd[5];
};
struct bfa_iocfc_drvcfg_s {
@@ -224,18 +226,24 @@ struct bfa_fw_port_physm_stats_s {
struct bfa_fw_fip_stats_s {
+ u32 vlan_req; /* vlan discovery requests */
+ u32 vlan_notify; /* vlan notifications */
+ u32 vlan_err; /* vlan response error */
+ u32 vlan_timeouts; /* vlan disvoery timeouts */
+ u32 vlan_invalids; /* invalid vlan in discovery advert. */
u32 disc_req; /* Discovery solicit requests */
u32 disc_rsp; /* Discovery solicit response */
u32 disc_err; /* Discovery advt. parse errors */
u32 disc_unsol; /* Discovery unsolicited */
u32 disc_timeouts; /* Discovery timeouts */
+ u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
u32 linksvc_unsupp; /* Unsupported link service req */
u32 linksvc_err; /* Parse error in link service req */
u32 logo_req; /* Number of FIP logos received */
u32 clrvlink_req; /* Clear virtual link req */
u32 op_unsupp; /* Unsupported FIP operation */
u32 untagged; /* Untagged frames (ignored) */
- u32 rsvd;
+ u32 invalid_version; /*!< Invalid FIP version */
};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
index 7359f82aacfc..0952a139c47c 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
@@ -59,8 +59,8 @@ enum bfa_lport_aen_event {
*/
struct bfa_lport_aen_data_s {
u16 vf_id; /* vf_id of this logical port */
- u16 rsvd;
- enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */
+ s16 roles; /* Logical port mode,IM/TM/IP etc */
+ u32 rsvd;
wwn_t ppwwn; /* WWN of its physical port */
wwn_t lpwwn; /* WWN of this logical port */
};
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
index 13fd4ab6aae2..c5bd9c36ad4d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -22,7 +22,47 @@
/**
* Manufacturing block version
*/
-#define BFA_MFG_VERSION 1
+#define BFA_MFG_VERSION 2
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER 2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN 128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN 4
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE 16
/**
* Manufacturing block format
@@ -30,29 +70,74 @@
#define BFA_MFG_SERIALNUM_SIZE 11
#define BFA_MFG_PARTNUM_SIZE 14
#define BFA_MFG_SUPPLIER_ID_SIZE 10
-#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
-#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
-#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
#define STRSZ(_n) (((_n) + 4) & ~3)
/**
+ * Manufacturing card type
+ */
+enum {
+ BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */
+ BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */
+ BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */
+ BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */
+ BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */
+ BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */
+ BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */
+};
+
+#pragma pack(1)
+
+/**
+ * Card type to port number conversion
+ */
+#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
+
+
+/**
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block_s {
+};
+
+/**
* VPD data length
*/
-#define BFA_MFG_VPD_LEN 256
+#define BFA_MFG_VPD_LEN 512
+
+#define BFA_MFG_VPD_PCI_HDR_OFF 137
+#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+ BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /* vendor IBM */
+ BFA_MFG_VPD_HP = 2, /* vendor HP */
+ BFA_MFG_VPD_DELL = 3, /* vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */
+};
/**
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_vpd_s {
- u8 version; /* vpd data version */
- u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
- u8 chksum; /* u8 checksum */
- u8 vendor; /* vendor */
- u8 len; /* vpd data length excluding header */
- u8 rsv;
- u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
+ u8 version; /* vpd data version */
+ u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */
+ u8 chksum; /* u8 checksum */
+ u8 vendor; /* vendor */
+ u8 len; /* vpd data length excluding header */
+ u8 rsv;
+ u8 data[BFA_MFG_VPD_LEN]; /* vpd data */
};
-#pragma pack(1)
+#pragma pack()
#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
index de0696c81bc4..501bc9739d9d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -185,6 +185,8 @@ struct bfa_port_attr_s {
wwn_t fabric_name; /* attached switch's nwwn */
u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached
* fabric's ip addr */
+ struct mac_s fpma_mac; /* Lport's FPMA Mac address */
+ u16 authfail; /* auth failed state */
};
/**
@@ -232,14 +234,15 @@ enum bfa_port_aen_sfp_pom {
};
struct bfa_port_aen_data_s {
- enum bfa_ioc_type_e ioc_type;
- wwn_t pwwn; /* WWN of the physical port */
- wwn_t fwwn; /* WWN of the fabric port */
- mac_t mac; /* MAC addres of the ethernet port,
- * applicable to CNA port only */
- int phy_port_num; /*! For SFP related events */
- enum bfa_port_aen_sfp_pom level; /* Only transitions will
- * be informed */
+ wwn_t pwwn; /* WWN of the physical port */
+ wwn_t fwwn; /* WWN of the fabric port */
+ s32 phy_port_num; /*! For SFP related events */
+ s16 ioc_type;
+ s16 level; /* Only transitions will
+ * be informed */
+ struct mac_s mac; /* MAC address of the ethernet port,
+ * applicable to CNA port only */
+ s16 rsvd;
};
#endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
index bf320412ee24..26e5cc78095d 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -232,7 +232,7 @@ struct bfa_pport_attr_s {
u32 pid; /* port ID */
enum bfa_pport_type port_type; /* current topology */
u32 loopback; /* external loopback */
- u32 rsvd1;
+ u32 authfail; /* auth fail state */
u32 rsvd2; /* padding for 64 bit */
};
@@ -240,73 +240,79 @@ struct bfa_pport_attr_s {
* FC Port statistics.
*/
struct bfa_pport_fc_stats_s {
- u64 secs_reset; /* seconds since stats is reset */
- u64 tx_frames; /* transmitted frames */
- u64 tx_words; /* transmitted words */
- u64 rx_frames; /* received frames */
- u64 rx_words; /* received words */
- u64 lip_count; /* LIPs seen */
- u64 nos_count; /* NOS count */
- u64 error_frames; /* errored frames (sent?) */
- u64 dropped_frames; /* dropped frames */
- u64 link_failures; /* link failure count */
- u64 loss_of_syncs; /* loss of sync count */
- u64 loss_of_signals;/* loss of signal count */
- u64 primseq_errs; /* primitive sequence protocol */
- u64 bad_os_count; /* invalid ordered set */
- u64 err_enc_out; /* Encoding error outside frame */
- u64 invalid_crcs; /* frames received with invalid CRC*/
- u64 undersized_frm; /* undersized frames */
- u64 oversized_frm; /* oversized frames */
- u64 bad_eof_frm; /* frames with bad EOF */
- struct bfa_qos_stats_s qos_stats; /* QoS statistics */
+ u64 secs_reset; /* Seconds since stats is reset */
+ u64 tx_frames; /* Tx frames */
+ u64 tx_words; /* Tx words */
+ u64 tx_lip; /* TX LIP */
+ u64 tx_nos; /* Tx NOS */
+ u64 tx_ols; /* Tx OLS */
+ u64 tx_lr; /* Tx LR */
+ u64 tx_lrr; /* Tx LRR */
+ u64 rx_frames; /* Rx frames */
+ u64 rx_words; /* Rx words */
+ u64 lip_count; /* Rx LIP */
+ u64 nos_count; /* Rx NOS */
+ u64 ols_count; /* Rx OLS */
+ u64 lr_count; /* Rx LR */
+ u64 lrr_count; /* Rx LRR */
+ u64 invalid_crcs; /* Rx CRC err frames */
+ u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */
+ u64 undersized_frm; /* Rx undersized frames */
+ u64 oversized_frm; /* Rx oversized frames */
+ u64 bad_eof_frm; /* Rx frames with bad EOF */
+ u64 error_frames; /* Errored frames */
+ u64 dropped_frames; /* Dropped frames */
+ u64 link_failures; /* Link Failure (LF) count */
+ u64 loss_of_syncs; /* Loss of sync count */
+ u64 loss_of_signals;/* Loss of signal count */
+ u64 primseq_errs; /* Primitive sequence protocol err. */
+ u64 bad_os_count; /* Invalid ordered sets */
+ u64 err_enc_out; /* Encoding err nonframe_8b10b */
+ u64 err_enc; /* Encoding err frame_8b10b */
};
/**
* Eth Port statistics.
*/
struct bfa_pport_eth_stats_s {
- u64 secs_reset; /* seconds since stats is reset */
- u64 frame_64; /* both rx and tx counter */
- u64 frame_65_127; /* both rx and tx counter */
- u64 frame_128_255; /* both rx and tx counter */
- u64 frame_256_511; /* both rx and tx counter */
- u64 frame_512_1023; /* both rx and tx counter */
- u64 frame_1024_1518; /* both rx and tx counter */
- u64 frame_1519_1522; /* both rx and tx counter */
-
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_mcast_packets;
- u64 tx_bcast_packets;
- u64 tx_control_frame;
- u64 tx_drop;
- u64 tx_jabber;
- u64 tx_fcs_error;
- u64 tx_fragments;
-
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_mcast_packets;
- u64 rx_bcast_packets;
- u64 rx_control_frames;
- u64 rx_unknown_opcode;
- u64 rx_drop;
- u64 rx_jabber;
- u64 rx_fcs_error;
- u64 rx_alignment_error;
- u64 rx_frame_length_error;
- u64 rx_code_error;
- u64 rx_fragments;
-
- u64 rx_pause; /* BPC */
- u64 rx_zero_pause; /* BPC Pause cancellation */
- u64 tx_pause; /* BPC */
- u64 tx_zero_pause; /* BPC Pause cancellation */
- u64 rx_fcoe_pause; /* BPC */
- u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */
- u64 tx_fcoe_pause; /* BPC */
- u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */
+ u64 secs_reset; /* Seconds since stats is reset */
+ u64 frame_64; /* Frames 64 bytes */
+ u64 frame_65_127; /* Frames 65-127 bytes */
+ u64 frame_128_255; /* Frames 128-255 bytes */
+ u64 frame_256_511; /* Frames 256-511 bytes */
+ u64 frame_512_1023; /* Frames 512-1023 bytes */
+ u64 frame_1024_1518; /* Frames 1024-1518 bytes */
+ u64 frame_1519_1522; /* Frames 1519-1522 bytes */
+ u64 tx_bytes; /* Tx bytes */
+ u64 tx_packets; /* Tx packets */
+ u64 tx_mcast_packets; /* Tx multicast packets */
+ u64 tx_bcast_packets; /* Tx broadcast packets */
+ u64 tx_control_frame; /* Tx control frame */
+ u64 tx_drop; /* Tx drops */
+ u64 tx_jabber; /* Tx jabber */
+ u64 tx_fcs_error; /* Tx FCS error */
+ u64 tx_fragments; /* Tx fragments */
+ u64 rx_bytes; /* Rx bytes */
+ u64 rx_packets; /* Rx packets */
+ u64 rx_mcast_packets; /* Rx multicast packets */
+ u64 rx_bcast_packets; /* Rx broadcast packets */
+ u64 rx_control_frames; /* Rx control frames */
+ u64 rx_unknown_opcode; /* Rx unknown opcode */
+ u64 rx_drop; /* Rx drops */
+ u64 rx_jabber; /* Rx jabber */
+ u64 rx_fcs_error; /* Rx FCS errors */
+ u64 rx_alignment_error; /* Rx alignment errors */
+ u64 rx_frame_length_error; /* Rx frame len errors */
+ u64 rx_code_error; /* Rx code errors */
+ u64 rx_fragments; /* Rx fragments */
+ u64 rx_pause; /* Rx pause */
+ u64 rx_zero_pause; /* Rx zero pause */
+ u64 tx_pause; /* Tx pause */
+ u64 tx_zero_pause; /* Tx zero pause */
+ u64 rx_fcoe_pause; /* Rx fcoe pause */
+ u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */
+ u64 tx_fcoe_pause; /* Tx FCoE pause */
+ u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */
};
/**
@@ -333,8 +339,7 @@ struct bfa_pport_fcpmap_s {
};
/**
- * Port RNID info.
- */
+ * Port RNI */
struct bfa_pport_rnid_s {
wwn_t wwn;
u32 unittype;
@@ -347,6 +352,23 @@ struct bfa_pport_rnid_s {
u16 topologydiscoveryflags;
};
+struct bfa_fcport_fcf_s {
+ wwn_t name; /* FCF name */
+ wwn_t fabric_name; /* Fabric Name */
+ u8 fipenabled; /* FIP enabled or not */
+ u8 fipfailed; /* FIP failed or not */
+ u8 resv[2];
+ u8 pri; /* FCF priority */
+ u8 version; /* FIP version used */
+ u8 available; /* Available for login */
+ u8 fka_disabled; /* FKA is disabled */
+ u8 maxsz_verified; /* FCoE max size verified */
+ u8 fc_map[3]; /* FC map */
+ u16 vlan; /* FCoE vlan tag/priority */
+ u32 fka_adv_per; /* FIP ka advert. period */
+ struct mac_s mac; /* FCF mac */
+};
+
/**
* Link state information
*/
@@ -378,6 +400,7 @@ struct bfa_pport_link_s {
struct fc_alpabm_s alpabm; /* alpa bitmap */
} loop_info;
} tl;
+ struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */
};
#endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index cdceaeb9f4b8..4374494bd566 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -180,8 +180,8 @@ enum bfa_status {
BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part
* of another team */
BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured.
- * Delete all VLANs before
- * creating team */
+ * Delete all VLANs to become
+ * part of the team */
BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured
* for adapters */
BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds
@@ -213,7 +213,7 @@ enum bfa_status {
* loaded */
BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */
BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */
- BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */
+ BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */
BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */
BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */
BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */
@@ -228,8 +228,7 @@ enum bfa_status {
BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem
* handle Failed. Please try
* after some time */
- BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not
- * Enabled on this port */
+ BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */
BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient
* permissions to execute the BCU
* application */
@@ -242,6 +241,14 @@ enum bfa_status {
* failed */
BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation
* failed */
+ BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the
+ * team */
+ BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */
+ BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't
+ * exists */
+ BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not
+ * allowed for the current
+ * Teaming mode */
BFA_STATUS_MAX_VAL /* Unknown error code */
};
#define bfa_status_t enum bfa_status
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
index a6c70aee0aa3..52585d3dd891 100644
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
@@ -70,7 +70,6 @@ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
*/
void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
-void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv);
void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
#endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
index 627669c65546..f2fd35fdee28 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -49,6 +49,7 @@ struct bfa_fcs_s {
struct bfa_trc_mod_s *trcmod; /* tracing module */
struct bfa_aen_s *aen; /* aen component */
bfa_boolean_t vf_enabled; /* VF mode is enabled */
+ bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */
bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
u16 port_vfid; /* port default VF ID */
struct bfa_fcs_driver_info_s driver_info;
@@ -60,10 +61,12 @@ struct bfa_fcs_s {
/*
* bfa fcs API functions
*/
-void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
bfa_boolean_t min_cfg);
+void bfa_fcs_init(struct bfa_fcs_s *fcs);
void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
struct bfa_fcs_driver_info_s *driver_info);
+void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
void bfa_fcs_exit(struct bfa_fcs_s *fcs);
void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
index 967ceb0eb074..ceaefd3060f4 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -34,14 +34,6 @@ struct bfa_fcs_s;
struct bfa_fcs_fabric_s;
/*
-* @todo : need to move to a global config file.
- * Maximum Vports supported per physical port or vf.
- */
-#define BFA_FCS_MAX_VPORTS_SUPP_CB 255
-#define BFA_FCS_MAX_VPORTS_SUPP_CT 191
-
-/*
-* @todo : need to move to a global config file.
* Maximum Rports supported per port (physical/logical).
*/
#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
index 0412aea2ec30..5f8f5e30b9e8 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h
@@ -27,4 +27,10 @@
(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
#define BFA_LOG_HAL_SM_ASSERT \
(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
+#define BFA_LOG_HAL_DRIVER_ERROR \
+ (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
+#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
+ (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
+#define BFA_LOG_HAL_MBOX_ERROR \
+ (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
#endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
index 317c0547ee16..bd451db4c30a 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -41,4 +41,20 @@
(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
#define BFA_LOG_LINUX_SCSI_ABORT_COMP \
(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
+#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
+#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
+#define BFA_LOG_LINUX_IOC_ERROR \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
+#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
+#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
+#define BFA_LOG_LINUX_DRIVER_ERROR \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
+#define BFA_LOG_LINUX_DRIVER_DIAG \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
+#define BFA_LOG_LINUX_DRIVER_AEN \
+ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
#endif
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 14969eecf6a9..8d1038035a76 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -50,6 +50,11 @@ struct fchs_s {
u32 ro; /* relative offset */
};
+
+#define FC_SOF_LEN 4
+#define FC_EOF_LEN 4
+#define FC_CRC_LEN 4
+
/*
* Fibre Channel BB_E Header Structure
*/
diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h
deleted file mode 100644
index 6830dc3ee58a..000000000000
--- a/drivers/scsi/bfa/include/protocol/pcifw.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-/**
- * pcifw.h PCI FW related headers
- */
-
-#ifndef __PCIFW_H__
-#define __PCIFW_H__
-
-#pragma pack(1)
-
-struct pnp_hdr_s{
- u32 signature; /* "$PnP" */
- u8 rev; /* Struct revision */
- u8 len; /* Header structure len in multiples
- * of 16 bytes */
- u16 off; /* Offset to next header 00 if none */
- u8 rsvd; /* Reserved byte */
- u8 cksum; /* 8-bit checksum for this header */
- u32 pnp_dev_id; /* PnP Device Id */
- u16 mfstr; /* Pointer to manufacturer string */
- u16 prstr; /* Pointer to product string */
- u8 devtype[3]; /* Device Type Code */
- u8 devind; /* Device Indicator */
- u16 bcventr; /* Bootstrap entry vector */
- u16 rsvd2; /* Reserved */
- u16 sriv; /* Static resource information vector */
-};
-
-struct pci_3_0_ds_s{
- u32 sig; /* Signature "PCIR" */
- u16 vendid; /* Vendor ID */
- u16 devid; /* Device ID */
- u16 devlistoff; /* Device List Offset */
- u16 len; /* PCI Data Structure Length */
- u8 rev; /* PCI Data Structure Revision */
- u8 clcode[3]; /* Class Code */
- u16 imglen; /* Code image length in multiples of
- * 512 bytes */
- u16 coderev; /* Revision level of code/data */
- u8 codetype; /* Code type 0x00 - BIOS */
- u8 indr; /* Last image indicator */
- u16 mrtimglen; /* Max Run Time Image Length */
- u16 cuoff; /* Config Utility Code Header Offset */
- u16 dmtfclp; /* DMTF CLP entry point offset */
-};
-
-struct pci_optrom_hdr_s{
- u16 sig; /* Signature 0x55AA */
- u8 len; /* Option ROM length in units of 512 bytes */
- u8 inivec[3]; /* Initialization vector */
- u8 rsvd[16]; /* Reserved field */
- u16 verptr; /* Pointer to version string - private */
- u16 pcids; /* Pointer to PCI data structure */
- u16 pnphdr; /* Pointer to PnP expansion header */
-};
-
-#pragma pack()
-
-#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
index f7c7f4f3c640..f6342efb6a90 100644
--- a/drivers/scsi/bfa/loop.c
+++ b/drivers/scsi/bfa/loop.c
@@ -162,7 +162,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa)
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
bfa_fcs_port_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_pport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index 1e06792cd4c2..d3907d184e2b 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -156,7 +156,7 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port)
/*
* Get Physical port's current speed
*/
- bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
pport_speed = pport_attr.speed;
bfa_trc(fcs, pport_speed);
@@ -235,7 +235,8 @@ bfa_fcs_port_get_info(struct bfa_fcs_port_s *port,
port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
- port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs);
+ port_info->max_vports_supp =
+ bfa_lps_get_max_vport(port->fcs->bfa);
port_info->num_vports_inuse =
bfa_fcs_fabric_vport_count(port->fabric);
port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
index c96b3ca007ae..5e8c8dee6c97 100644
--- a/drivers/scsi/bfa/ms.c
+++ b/drivers/scsi/bfa/ms.c
@@ -118,7 +118,7 @@ bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -141,7 +141,7 @@ bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -190,7 +190,7 @@ bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -216,7 +216,7 @@ bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -230,10 +230,6 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
switch (event) {
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
- /*
- * now invoke MS related sub-modules
- */
- bfa_fcs_port_fdmi_offline(ms);
break;
case MSSM_EVENT_PORT_FABRIC_RSCN:
@@ -243,7 +239,7 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -266,7 +262,7 @@ bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -304,7 +300,7 @@ bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -330,7 +326,7 @@ bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -466,7 +462,7 @@ bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -502,7 +498,7 @@ bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -528,7 +524,7 @@ bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ms->port->fcs, event);
}
}
@@ -637,7 +633,7 @@ bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_os_hton3b(FC_MGMT_SERVER),
bfa_fcs_port_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_pport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
@@ -735,6 +731,7 @@ bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port)
ms->port = port;
bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
+ bfa_fcs_port_fdmi_offline(ms);
}
void
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index 2f8b880060bb..d20dd7e15742 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -164,7 +164,7 @@ bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -187,7 +187,7 @@ bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -221,7 +221,7 @@ bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -247,7 +247,7 @@ bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -270,7 +270,7 @@ bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -304,7 +304,7 @@ bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -330,7 +330,7 @@ bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -353,7 +353,7 @@ bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -390,7 +390,7 @@ bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -413,7 +413,7 @@ bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -436,7 +436,7 @@ bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -494,7 +494,7 @@ bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -517,7 +517,7 @@ bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
static void
@@ -539,7 +539,7 @@ bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -575,7 +575,7 @@ bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -598,7 +598,7 @@ bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -626,7 +626,7 @@ bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(ns->port->fcs, event);
}
}
@@ -660,7 +660,7 @@ bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_os_hton3b(FC_NAME_SERVER),
bfa_fcs_port_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_pport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
index 9cf58bb138dc..8e73dd9a625a 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/rport.c
@@ -224,7 +224,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -276,7 +276,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -332,7 +332,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -406,7 +406,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -481,7 +481,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -534,7 +534,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -589,7 +589,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -646,7 +646,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -704,7 +704,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -754,7 +754,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -816,7 +816,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -846,7 +846,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -869,7 +869,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -905,7 +905,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -925,10 +925,17 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_HCB_OFFLINE:
case RPSM_EVENT_ADDRESS_CHANGE:
if (bfa_fcs_port_is_online(rport->port)) {
- bfa_sm_set_state(rport,
- bfa_fcs_rport_sm_nsdisc_sending);
- rport->ns_retries = 0;
- bfa_fcs_rport_send_gidpn(rport, NULL);
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_gidpn(rport, NULL);
+ } else {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ }
} else {
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -951,7 +958,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1011,7 +1018,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1038,7 +1045,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1073,7 +1080,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1132,7 +1139,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1188,7 +1195,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1249,7 +1256,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1334,7 +1341,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -1366,7 +1373,7 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_port_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_pport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1478,7 +1485,7 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_port_get_fcid(port), rport->reply_oxid,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_pport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -1813,7 +1820,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
/*
* get curent speed from pport attributes from BFA
*/
- bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+ bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
@@ -2032,13 +2039,10 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
switch (event) {
case BFA_RPORT_AEN_ONLINE:
- bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr);
- break;
case BFA_RPORT_AEN_OFFLINE:
- bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr);
- break;
case BFA_RPORT_AEN_DISCONNECT:
- bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
+ rpwwn_ptr, lpwwn_ptr);
break;
case BFA_RPORT_AEN_QOS_PRIO:
aen_data.rport.priv.qos = data->priv.qos;
@@ -2164,7 +2168,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
bfa_trc(port->fcs, port->fabric->bb_credit);
port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
- bfa_pport_set_tx_bbcredit(port->fcs->bfa,
+ bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
port->fabric->bb_credit);
}
@@ -2575,23 +2579,6 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
}
/**
- * Module initialization
- */
-void
-bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- * Module cleanup
- */
-void
-bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs)
-{
- bfa_fcs_modexit_comp(fcs);
-}
-
-/**
* Return state of rport.
*/
int
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
index 3dae1774181e..a441f41d2a64 100644
--- a/drivers/scsi/bfa/rport_api.c
+++ b/drivers/scsi/bfa/rport_api.c
@@ -102,7 +102,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
rport_attr->qos_attr = qos_attr;
rport_attr->trl_enforced = BFA_FALSE;
- if (bfa_pport_is_ratelim(port->fcs->bfa)) {
+ if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) ||
(rport->rpf.rpsc_speed <
bfa_fcs_port_get_rport_max_speed(port)))
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index e1932c885ac2..ae7bba67ae2a 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -91,7 +91,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -114,7 +114,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -160,7 +160,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -186,7 +186,7 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -206,7 +206,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
@@ -229,7 +229,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(rport->fcs, event);
}
}
/**
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
index bd4771ff62c8..8fe09ba88a91 100644
--- a/drivers/scsi/bfa/scn.c
+++ b/drivers/scsi/bfa/scn.c
@@ -90,7 +90,7 @@ bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(scn->port->fcs, event);
}
}
@@ -109,7 +109,7 @@ bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(scn->port->fcs, event);
}
}
@@ -137,7 +137,7 @@ bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(scn->port->fcs, event);
}
}
@@ -157,7 +157,7 @@ bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(scn->port->fcs, event);
}
}
@@ -171,7 +171,7 @@ bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(scn->port->fcs, event);
}
}
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index e90f1e38c32d..27cd619a227a 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -122,7 +122,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -165,7 +165,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -202,7 +202,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -249,7 +249,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -283,7 +283,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -310,7 +310,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -339,7 +339,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -387,7 +387,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -419,7 +419,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
break;
default:
- bfa_assert(0);
+ bfa_sm_fault(__vport_fcs(vport), event);
}
}
@@ -447,22 +447,8 @@ bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event)
bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
- switch (event) {
- case BFA_LPORT_AEN_NPIV_DUP_WWN:
- bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_NPIV_FABRIC_MAX:
- bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr,
- role_str[role / 2]);
- break;
- case BFA_LPORT_AEN_NPIV_UNKNOWN:
- bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr,
- role_str[role / 2]);
- break;
- default:
- break;
- }
+ bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
+ role_str[role/2]);
aen_data.lport.vf_id = port->fabric->vf_id;
aen_data.lport.roles = role;
@@ -478,7 +464,7 @@ static void
bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
{
bfa_lps_fdisc(vport->lps, vport,
- bfa_pport_get_maxfrsize(__vport_bfa(vport)),
+ bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
__vport_pwwn(vport), __vport_nwwn(vport));
vport->vport_stats.fdisc_sent++;
}
@@ -617,38 +603,6 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
}
/**
- * Module initialization
- */
-void
-bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- * Module cleanup
- */
-void
-bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs)
-{
- bfa_fcs_modexit_comp(fcs);
-}
-
-u32
-bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
-{
- struct bfa_ioc_attr_s ioc_attr;
-
- bfa_get_attr(fcs->bfa, &ioc_attr);
-
- if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
- return BFA_FCS_MAX_VPORTS_SUPP_CT;
- else
- return BFA_FCS_MAX_VPORTS_SUPP_CB;
-}
-
-
-
-/**
* fcs_vport_api Virtual port API
*/
@@ -684,7 +638,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
return BFA_STATUS_VPORT_EXISTS;
if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
- bfa_fcs_vport_get_max(fcs))
+ bfa_lps_get_max_vport(fcs->bfa))
return BFA_STATUS_VPORT_MAX;
vport->lps = bfa_lps_alloc(fcs->bfa);
@@ -694,7 +648,8 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
vport->vport_drv = vport_drv;
bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
- bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport);
+ bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
+ bfa_fcs_lport_init(&vport->lport, vport_cfg);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
@@ -888,4 +843,15 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
}
+/**
+ * Received clear virtual link
+ */
+void
+bfa_cb_lps_cvl_event(void *bfad, void *uarg)
+{
+ struct bfa_fcs_vport_s *vport = uarg;
+ /* Send an Offline followed by an ONLINE */
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1c4d1215769d..cb71dc984797 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1989,7 +1989,7 @@ static struct scsi_host_template bnx2i_host_template = {
.queuecommand = iscsi_queuecommand,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
- .eh_target_reset_handler = iscsi_eh_target_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 412853c65372..b7c30585dadd 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -915,7 +915,7 @@ static struct scsi_host_template cxgb3i_host_template = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
- .eh_target_reset_handler = iscsi_eh_target_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
.use_clustering = DISABLE_CLUSTERING,
.this_id = -1,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 61966750bd60..63032ec3db92 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -272,7 +272,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
int len = 0;
rq = blk_get_request(sdev->request_queue,
- (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
+ (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
return NULL;
@@ -286,14 +286,17 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
len = sizeof(short_trespass);
rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
+ rq->cmd[4] = len;
break;
case MODE_SELECT_10:
len = sizeof(long_trespass);
rq->cmd_flags |= REQ_RW;
rq->cmd[1] = 0x10;
+ rq->cmd[8] = len;
break;
case INQUIRY:
len = CLARIION_BUFFER_SIZE;
+ rq->cmd[4] = len;
memset(buffer, 0, len);
break;
default:
@@ -301,7 +304,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
break;
}
- rq->cmd[4] = len;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 03697ba94251..183d3a43c280 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
@@ -52,7 +53,7 @@
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.1-3"
+#define HPSA_DRIVER_VERSION "2.0.2-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -134,6 +135,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
static void hpsa_scan_start(struct Scsi_Host *);
static int hpsa_scan_finished(struct Scsi_Host *sh,
unsigned long elapsed_time);
+static int hpsa_change_queue_depth(struct scsi_device *sdev,
+ int qdepth, int reason);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -182,8 +185,8 @@ static struct scsi_host_template hpsa_driver_template = {
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
+ .change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
- .sg_tablesize = MAXSGENTRIES,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
@@ -208,133 +211,6 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
return (struct ctlr_info *) *priv;
}
-static struct task_struct *hpsa_scan_thread;
-static DEFINE_MUTEX(hpsa_scan_mutex);
-static LIST_HEAD(hpsa_scan_q);
-static int hpsa_scan_func(void *data);
-
-/**
- * add_to_scan_list() - add controller to rescan queue
- * @h: Pointer to the controller.
- *
- * Adds the controller to the rescan queue if not already on the queue.
- *
- * returns 1 if added to the queue, 0 if skipped (could be on the
- * queue already, or the controller could be initializing or shutting
- * down).
- **/
-static int add_to_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h;
- int found = 0;
- int ret = 0;
-
- if (h->busy_initializing)
- return 0;
-
- /*
- * If we don't get the lock, it means the driver is unloading
- * and there's no point in scheduling a new scan.
- */
- if (!mutex_trylock(&h->busy_shutting_down))
- return 0;
-
- mutex_lock(&hpsa_scan_mutex);
- list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
- if (test_h == h) {
- found = 1;
- break;
- }
- }
- if (!found && !h->busy_scanning) {
- INIT_COMPLETION(h->scan_wait);
- list_add_tail(&h->scan_list, &hpsa_scan_q);
- ret = 1;
- }
- mutex_unlock(&hpsa_scan_mutex);
- mutex_unlock(&h->busy_shutting_down);
-
- return ret;
-}
-
-/**
- * remove_from_scan_list() - remove controller from rescan queue
- * @h: Pointer to the controller.
- *
- * Removes the controller from the rescan queue if present. Blocks if
- * the controller is currently conducting a rescan. The controller
- * can be in one of three states:
- * 1. Doesn't need a scan
- * 2. On the scan list, but not scanning yet (we remove it)
- * 3. Busy scanning (and not on the list). In this case we want to wait for
- * the scan to complete to make sure the scanning thread for this
- * controller is completely idle.
- **/
-static void remove_from_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h, *tmp_h;
-
- mutex_lock(&hpsa_scan_mutex);
- list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
- if (test_h == h) { /* state 2. */
- list_del(&h->scan_list);
- complete_all(&h->scan_wait);
- mutex_unlock(&hpsa_scan_mutex);
- return;
- }
- }
- if (h->busy_scanning) { /* state 3. */
- mutex_unlock(&hpsa_scan_mutex);
- wait_for_completion(&h->scan_wait);
- } else { /* state 1, nothing to do. */
- mutex_unlock(&hpsa_scan_mutex);
- }
-}
-
-/* hpsa_scan_func() - kernel thread used to rescan controllers
- * @data: Ignored.
- *
- * A kernel thread used scan for drive topology changes on
- * controllers. The thread processes only one controller at a time
- * using a queue. Controllers are added to the queue using
- * add_to_scan_list() and removed from the queue either after done
- * processing or using remove_from_scan_list().
- *
- * returns 0.
- **/
-static int hpsa_scan_func(__attribute__((unused)) void *data)
-{
- struct ctlr_info *h;
- int host_no;
-
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- if (kthread_should_stop())
- break;
-
- while (1) {
- mutex_lock(&hpsa_scan_mutex);
- if (list_empty(&hpsa_scan_q)) {
- mutex_unlock(&hpsa_scan_mutex);
- break;
- }
- h = list_entry(hpsa_scan_q.next, struct ctlr_info,
- scan_list);
- list_del(&h->scan_list);
- h->busy_scanning = 1;
- mutex_unlock(&hpsa_scan_mutex);
- host_no = h->scsi_host ? h->scsi_host->host_no : -1;
- hpsa_scan_start(h->scsi_host);
- complete_all(&h->scan_wait);
- mutex_lock(&hpsa_scan_mutex);
- h->busy_scanning = 0;
- mutex_unlock(&hpsa_scan_mutex);
- }
- }
- return 0;
-}
-
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c)
{
@@ -352,21 +228,8 @@ static int check_for_unit_attention(struct ctlr_info *h,
break;
case REPORT_LUNS_CHANGED:
dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
- "changed\n", h->ctlr);
+ "changed, action required\n", h->ctlr);
/*
- * Here, we could call add_to_scan_list and wake up the scan thread,
- * except that it's quite likely that we will get more than one
- * REPORT_LUNS_CHANGED condition in quick succession, which means
- * that those which occur after the first one will likely happen
- * *during* the hpsa_scan_thread's rescan. And the rescan code is not
- * robust enough to restart in the middle, undoing what it has already
- * done, and it's not clear that it's even possible to do this, since
- * part of what it does is notify the SCSI mid layer, which starts
- * doing it's own i/o to read partition tables and so on, and the
- * driver doesn't have visibility to know what might need undoing.
- * In any event, if possible, it is horribly complicated to get right
- * so we just don't do it for now.
- *
* Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
*/
break;
@@ -393,10 +256,7 @@ static ssize_t host_store_rescan(struct device *dev,
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
h = shost_to_hba(shost);
- if (add_to_scan_list(h)) {
- wake_up_process(hpsa_scan_thread);
- wait_for_completion_interruptible(&h->scan_wait);
- }
+ hpsa_scan_start(h->scsi_host);
return count;
}
@@ -983,6 +843,76 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
spin_lock_init(&h->devlock);
}
+static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (!h->cmd_sg_list)
+ return;
+ for (i = 0; i < h->nr_cmds; i++) {
+ kfree(h->cmd_sg_list[i]);
+ h->cmd_sg_list[i] = NULL;
+ }
+ kfree(h->cmd_sg_list);
+ h->cmd_sg_list = NULL;
+}
+
+static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
+{
+ int i;
+
+ if (h->chainsize <= 0)
+ return 0;
+
+ h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
+ GFP_KERNEL);
+ if (!h->cmd_sg_list)
+ return -ENOMEM;
+ for (i = 0; i < h->nr_cmds; i++) {
+ h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
+ h->chainsize, GFP_KERNEL);
+ if (!h->cmd_sg_list[i])
+ goto clean;
+ }
+ return 0;
+
+clean:
+ hpsa_free_sg_chain_blocks(h);
+ return -ENOMEM;
+}
+
+static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct SGDescriptor *chain_sg, *chain_block;
+ u64 temp64;
+
+ chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+ chain_block = h->cmd_sg_list[c->cmdindex];
+ chain_sg->Ext = HPSA_SG_CHAIN;
+ chain_sg->Len = sizeof(*chain_sg) *
+ (c->Header.SGTotal - h->max_cmd_sg_entries);
+ temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
+ PCI_DMA_TODEVICE);
+ chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
+ chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+}
+
+static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ struct SGDescriptor *chain_sg;
+ union u64bit temp64;
+
+ if (c->Header.SGTotal <= h->max_cmd_sg_entries)
+ return;
+
+ chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+ temp64.val32.lower = chain_sg->Addr.lower;
+ temp64.val32.upper = chain_sg->Addr.upper;
+ pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
+}
+
static void complete_scsi_command(struct CommandList *cp,
int timeout, u32 tag)
{
@@ -999,10 +929,12 @@ static void complete_scsi_command(struct CommandList *cp,
h = cp->h;
scsi_dma_unmap(cmd); /* undo the DMA mappings */
+ if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+ hpsa_unmap_sg_chain_block(h, cp);
cmd->result = (DID_OK << 16); /* host byte */
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
- cmd->result |= (ei->ScsiStatus << 1);
+ cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
memcpy(cmd->sense_buffer, ei->SenseInfo,
@@ -1203,6 +1135,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
sh->cmd_per_lun = h->nr_cmds;
+ sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[PERF_MODE_INT];
@@ -1382,7 +1315,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
- return -1;
+ return -ENOMEM;
}
fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
@@ -1904,16 +1837,17 @@ out:
* dma mapping and fills in the scatter gather entries of the
* hpsa command, cp.
*/
-static int hpsa_scatter_gather(struct pci_dev *pdev,
+static int hpsa_scatter_gather(struct ctlr_info *h,
struct CommandList *cp,
struct scsi_cmnd *cmd)
{
unsigned int len;
struct scatterlist *sg;
u64 addr64;
- int use_sg, i;
+ int use_sg, i, sg_index, chained;
+ struct SGDescriptor *curr_sg;
- BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+ BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
use_sg = scsi_dma_map(cmd);
if (use_sg < 0)
@@ -1922,15 +1856,33 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
if (!use_sg)
goto sglist_finished;
+ curr_sg = cp->SG;
+ chained = 0;
+ sg_index = 0;
scsi_for_each_sg(cmd, sg, use_sg, i) {
+ if (i == h->max_cmd_sg_entries - 1 &&
+ use_sg > h->max_cmd_sg_entries) {
+ chained = 1;
+ curr_sg = h->cmd_sg_list[cp->cmdindex];
+ sg_index = 0;
+ }
addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
- cp->SG[i].Addr.lower =
- (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
- cp->SG[i].Addr.upper =
- (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
- cp->SG[i].Len = len;
- cp->SG[i].Ext = 0; /* we are not chaining */
+ curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+ curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+ curr_sg->Len = len;
+ curr_sg->Ext = 0; /* we are not chaining */
+ curr_sg++;
+ }
+
+ if (use_sg + chained > h->maxSG)
+ h->maxSG = use_sg + chained;
+
+ if (chained) {
+ cp->Header.SGList = h->max_cmd_sg_entries;
+ cp->Header.SGTotal = (u16) (use_sg + 1);
+ hpsa_map_sg_chain_block(h, cp);
+ return 0;
}
sglist_finished:
@@ -2026,7 +1978,7 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
break;
}
- if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
+ if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
cmd_free(h, c);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -2077,6 +2029,23 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
return finished;
}
+static int hpsa_change_queue_depth(struct scsi_device *sdev,
+ int qdepth, int reason)
+{
+ struct ctlr_info *h = sdev_to_hba(sdev);
+
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -ENOTSUPP;
+
+ if (qdepth < 1)
+ qdepth = 1;
+ else
+ if (qdepth > h->nr_cmds)
+ qdepth = h->nr_cmds;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ return sdev->queue_depth;
+}
+
static void hpsa_unregister_scsi(struct ctlr_info *h)
{
/* we are being forcibly unloaded, and may not refuse. */
@@ -2961,7 +2930,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmwart. */
+/* Send a message CDB to the firmware. */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -3296,7 +3265,7 @@ default_int_mode:
h->intr[PERF_MODE_INT] = pdev->irq;
}
-static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
{
ushort subsystem_vendor_id, subsystem_device_id, command;
u32 board_id, scratchpad = 0;
@@ -3405,6 +3374,23 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
h->board_id = board_id;
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+
+ /*
+ * Limit in-command s/g elements to 32 save dma'able memory.
+ * Howvever spec says if 0, use 31
+ */
+
+ h->max_cmd_sg_entries = 31;
+ if (h->maxsgentries > 512) {
+ h->max_cmd_sg_entries = 32;
+ h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
+ h->maxsgentries--; /* save one for chain pointer */
+ } else {
+ h->maxsgentries = 31; /* default to traditional values */
+ h->chainsize = 0;
+ }
+
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
/* Allow room for some ioctls */
@@ -3532,8 +3518,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->busy_initializing = 1;
INIT_HLIST_HEAD(&h->cmpQ);
INIT_HLIST_HEAD(&h->reqQ);
- mutex_init(&h->busy_shutting_down);
- init_completion(&h->scan_wait);
rc = hpsa_pci_init(h, pdev);
if (rc != 0)
goto clean1;
@@ -3587,6 +3571,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
rc = -ENOMEM;
goto clean4;
}
+ if (hpsa_allocate_sg_chain_blocks(h))
+ goto clean4;
spin_lock_init(&h->lock);
spin_lock_init(&h->scan_lock);
init_waitqueue_head(&h->scan_wait_queue);
@@ -3609,6 +3595,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
return 1;
clean4:
+ hpsa_free_sg_chain_blocks(h);
kfree(h->cmd_pool_bits);
if (h->cmd_pool)
pci_free_consistent(h->pdev,
@@ -3681,11 +3668,10 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
return;
}
h = pci_get_drvdata(pdev);
- mutex_lock(&h->busy_shutting_down);
- remove_from_scan_list(h);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
hpsa_shutdown(pdev);
iounmap(h->vaddr);
+ hpsa_free_sg_chain_blocks(h);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool_dhandle);
@@ -3703,7 +3689,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- mutex_unlock(&h->busy_shutting_down);
kfree(h);
}
@@ -3857,23 +3842,12 @@ clean_up:
*/
static int __init hpsa_init(void)
{
- int err;
- /* Start the scan thread */
- hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
- if (IS_ERR(hpsa_scan_thread)) {
- err = PTR_ERR(hpsa_scan_thread);
- return -ENODEV;
- }
- err = pci_register_driver(&hpsa_pci_driver);
- if (err)
- kthread_stop(hpsa_scan_thread);
- return err;
+ return pci_register_driver(&hpsa_pci_driver);
}
static void __exit hpsa_cleanup(void)
{
pci_unregister_driver(&hpsa_pci_driver);
- kthread_stop(hpsa_scan_thread);
}
module_init(hpsa_init);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a0502b3ac17e..1bb5233b09a0 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -83,6 +83,10 @@ struct ctlr_info {
unsigned int maxQsinceinit;
unsigned int maxSG;
spinlock_t lock;
+ int maxsgentries;
+ u8 max_cmd_sg_entries;
+ int chainsize;
+ struct SGDescriptor **cmd_sg_list;
/* pointers to command and error info pool */
struct CommandList *cmd_pool;
@@ -97,9 +101,6 @@ struct ctlr_info {
int scan_finished;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
- struct mutex busy_shutting_down;
- struct list_head scan_list;
- struct completion scan_wait;
struct Scsi_Host *scsi_host;
spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3e0abdf76689..56fb9827681e 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,8 @@
/* general boundary defintions */
#define SENSEINFOBYTES 32 /* may vary between hbas */
-#define MAXSGENTRIES 31
+#define MAXSGENTRIES 32
+#define HPSA_SG_CHAIN 0x80000000
#define MAXREPLYQS 256
/* Command Status value */
@@ -305,20 +306,23 @@ struct CommandList {
int cmd_type;
long cmdindex;
struct hlist_node list;
- struct CommandList *prev;
- struct CommandList *next;
struct request *rq;
struct completion *waiting;
- int retry_count;
void *scsi_cmd;
/* on 64 bit architectures, to get this to be 32-byte-aligned
- * it so happens we need no padding, on 32 bit systems,
- * we need 8 bytes of padding. This does that.
+ * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
+ * we need PAD_32 bytes of padding (see below). This does that.
+ * If it happens that 64 bit and 32 bit systems need different
+ * padding, PAD_32 and PAD_64 can be set independently, and.
+ * the code below will do the right thing.
*/
-#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
+#define IS_32_BIT ((8 - sizeof(long))/4)
+#define IS_64_BIT (!IS_32_BIT)
+#define PAD_32 (4)
+#define PAD_64 (4)
+#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
u8 pad[COMMANDLIST_PAD];
-
};
/* Configuration Table Structure */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 732f6d35b4a8..4e577e2fee38 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/of.h>
+#include <linux/pm.h>
#include <linux/stringify.h>
#include <asm/firmware.h>
#include <asm/irq.h>
@@ -4736,6 +4737,27 @@ static int ibmvfc_remove(struct vio_dev *vdev)
}
/**
+ * ibmvfc_resume - Resume from suspend
+ * @dev: device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ *
+ */
+static int ibmvfc_resume(struct device *dev)
+{
+ unsigned long flags;
+ struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+ struct vio_dev *vdev = to_vio_dev(dev);
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ vio_disable_interrupts(vdev);
+ tasklet_schedule(&vhost->tasklet);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ return 0;
+}
+
+/**
* ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
* @vdev: vio device struct
*
@@ -4755,6 +4777,10 @@ static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
};
MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
+static struct dev_pm_ops ibmvfc_pm_ops = {
+ .resume = ibmvfc_resume
+};
+
static struct vio_driver ibmvfc_driver = {
.id_table = ibmvfc_device_table,
.probe = ibmvfc_probe,
@@ -4763,6 +4789,7 @@ static struct vio_driver ibmvfc_driver = {
.driver = {
.name = IBMVFC_NAME,
.owner = THIS_MODULE,
+ .pm = &ibmvfc_pm_ops,
}
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e3a18e0ef276..dc1bcbe3b176 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -71,6 +71,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/of.h>
+#include <linux/pm.h>
#include <asm/firmware.h>
#include <asm/vio.h>
#include <scsi/scsi.h>
@@ -1991,6 +1992,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
}
/**
+ * ibmvscsi_resume: Resume from suspend
+ * @dev: device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ */
+static int ibmvscsi_resume(struct device *dev)
+{
+ struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
+ return ibmvscsi_ops->resume(hostdata);
+}
+
+/**
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
* support.
*/
@@ -2000,6 +2014,10 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
};
MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
+static struct dev_pm_ops ibmvscsi_pm_ops = {
+ .resume = ibmvscsi_resume
+};
+
static struct vio_driver ibmvscsi_driver = {
.id_table = ibmvscsi_device_table,
.probe = ibmvscsi_probe,
@@ -2008,6 +2026,7 @@ static struct vio_driver ibmvscsi_driver = {
.driver = {
.name = "ibmvscsi",
.owner = THIS_MODULE,
+ .pm = &ibmvscsi_pm_ops,
}
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 76425303def0..9cb7c6a773e1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -120,6 +120,7 @@ struct ibmvscsi_ops {
struct ibmvscsi_host_data *hostdata);
int (*send_crq)(struct ibmvscsi_host_data *hostdata,
u64 word1, u64 word2);
+ int (*resume) (struct ibmvscsi_host_data *hostdata);
};
extern struct ibmvscsi_ops iseriesvscsi_ops;
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 0775fdee5fa8..f4776451a754 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -158,10 +158,16 @@ static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
0);
}
+static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
+{
+ return 0;
+}
+
struct ibmvscsi_ops iseriesvscsi_ops = {
.init_crq_queue = iseriesvscsi_init_crq_queue,
.release_crq_queue = iseriesvscsi_release_crq_queue,
.reset_crq_queue = iseriesvscsi_reset_crq_queue,
.reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
.send_crq = iseriesvscsi_send_crq,
+ .resume = iseriesvscsi_resume,
};
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 462a8574dad9..63a30cbbf9de 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -334,10 +334,23 @@ static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
return rc;
}
+/**
+ * rpavscsi_resume: - resume after suspend
+ * @hostdata: ibmvscsi_host_data of host
+ *
+ */
+static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
+{
+ vio_disable_interrupts(to_vio_dev(hostdata->dev));
+ tasklet_schedule(&hostdata->srp_task);
+ return 0;
+}
+
struct ibmvscsi_ops rpavscsi_ops = {
.init_crq_queue = rpavscsi_init_crq_queue,
.release_crq_queue = rpavscsi_release_crq_queue,
.reset_crq_queue = rpavscsi_reset_crq_queue,
.reenable_crq_queue = rpavscsi_reenable_crq_queue,
.send_crq = rpavscsi_send_crq,
+ .resume = rpavscsi_resume,
};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 032f0d0e6cb4..c79cd98eb6bf 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -72,6 +72,8 @@
#include <linux/moduleparam.h>
#include <linux/libata.h>
#include <linux/hdreg.h>
+#include <linux/reboot.h>
+#include <linux/stringify.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
@@ -91,8 +93,8 @@ static unsigned int ipr_max_speed = 1;
static int ipr_testmode = 0;
static unsigned int ipr_fastfail = 0;
static unsigned int ipr_transop_timeout = 0;
-static unsigned int ipr_enable_cache = 1;
static unsigned int ipr_debug = 0;
+static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
static DEFINE_SPINLOCK(ipr_driver_lock);
@@ -104,13 +106,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
+ .clr_interrupt_mask_reg32 = 0x00230,
.sense_interrupt_mask_reg = 0x0022C,
+ .sense_interrupt_mask_reg32 = 0x0022C,
.clr_interrupt_reg = 0x00228,
+ .clr_interrupt_reg32 = 0x00228,
.sense_interrupt_reg = 0x00224,
+ .sense_interrupt_reg32 = 0x00224,
.ioarrin_reg = 0x00404,
.sense_uproc_interrupt_reg = 0x00214,
+ .sense_uproc_interrupt_reg32 = 0x00214,
.set_uproc_interrupt_reg = 0x00214,
- .clr_uproc_interrupt_reg = 0x00218
+ .set_uproc_interrupt_reg32 = 0x00214,
+ .clr_uproc_interrupt_reg = 0x00218,
+ .clr_uproc_interrupt_reg32 = 0x00218
}
},
{ /* Snipe and Scamp */
@@ -119,25 +128,59 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
+ .clr_interrupt_mask_reg32 = 0x0028C,
.sense_interrupt_mask_reg = 0x00288,
+ .sense_interrupt_mask_reg32 = 0x00288,
.clr_interrupt_reg = 0x00284,
+ .clr_interrupt_reg32 = 0x00284,
.sense_interrupt_reg = 0x00280,
+ .sense_interrupt_reg32 = 0x00280,
.ioarrin_reg = 0x00504,
.sense_uproc_interrupt_reg = 0x00290,
+ .sense_uproc_interrupt_reg32 = 0x00290,
.set_uproc_interrupt_reg = 0x00290,
- .clr_uproc_interrupt_reg = 0x00294
+ .set_uproc_interrupt_reg32 = 0x00290,
+ .clr_uproc_interrupt_reg = 0x00294,
+ .clr_uproc_interrupt_reg32 = 0x00294
+ }
+ },
+ { /* CRoC */
+ .mailbox = 0x00040,
+ .cache_line_size = 0x20,
+ {
+ .set_interrupt_mask_reg = 0x00010,
+ .clr_interrupt_mask_reg = 0x00018,
+ .clr_interrupt_mask_reg32 = 0x0001C,
+ .sense_interrupt_mask_reg = 0x00010,
+ .sense_interrupt_mask_reg32 = 0x00014,
+ .clr_interrupt_reg = 0x00008,
+ .clr_interrupt_reg32 = 0x0000C,
+ .sense_interrupt_reg = 0x00000,
+ .sense_interrupt_reg32 = 0x00004,
+ .ioarrin_reg = 0x00070,
+ .sense_uproc_interrupt_reg = 0x00020,
+ .sense_uproc_interrupt_reg32 = 0x00024,
+ .set_uproc_interrupt_reg = 0x00020,
+ .set_uproc_interrupt_reg32 = 0x00024,
+ .clr_uproc_interrupt_reg = 0x00028,
+ .clr_uproc_interrupt_reg32 = 0x0002C,
+ .init_feedback_reg = 0x0005C,
+ .dump_addr_reg = 0x00064,
+ .dump_data_reg = 0x00068
}
},
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds [] = {
@@ -156,12 +199,13 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
-module_param_named(enable_cache, ipr_enable_cache, int, 0);
-MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
+module_param_named(max_devs, ipr_max_devs, int, 0);
+MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
+ "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -180,6 +224,20 @@ struct ipr_error_table_t ipr_error_table[] = {
"FFFE: Soft device bus error recovered by the IOA"},
{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
"4101: Soft device bus fabric error"},
+ {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFC: Logical block guard error recovered by the device"},
+ {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFC: Logical block reference tag error recovered by the device"},
+ {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4171: Recovered scatter list tag / sequence number error"},
+ {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
+ {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "4171: Recovered logical block sequence number error on IOA to Host transfer"},
+ {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFD: Recovered logical block reference tag error detected by the IOA"},
+ {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "FFFD: Logical block guard error recovered by the IOA"},
{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
"FFF9: Device sector reassign successful"},
{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -236,12 +294,28 @@ struct ipr_error_table_t ipr_error_table[] = {
"3120: SCSI bus is not operational"},
{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
"4100: Hard device bus fabric error"},
+ {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310C: Logical block guard error detected by the device"},
+ {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310C: Logical block reference tag error detected by the device"},
+ {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
+ "4170: Scatter list tag / sequence number error"},
+ {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
+ "8150: Logical block CRC error on IOA to Host transfer"},
+ {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
+ "4170: Logical block sequence number error on IOA to Host transfer"},
+ {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310D: Logical block reference tag error detected by the IOA"},
+ {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "310D: Logical block guard error detected by the IOA"},
{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
"9000: IOA reserved area data check"},
{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
"9001: IOA reserved area invalid data pattern"},
{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
"9002: IOA reserved area LRC error"},
+ {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
+ "Hardware Error, IOA metadata access error"},
{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
"102E: Out of alternate sectors for disk storage"},
{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
@@ -306,6 +380,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"Illegal request, commands not allowed to this device"},
{0x05258100, 0, 0,
"Illegal request, command not allowed to a secondary adapter"},
+ {0x05258200, 0, 0,
+ "Illegal request, command not allowed to a non-optimized resource"},
{0x05260000, 0, 0,
"Illegal request, invalid field in parameter list"},
{0x05260100, 0, 0,
@@ -468,7 +544,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
- trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
+ if (ipr_cmd->ioa_cfg->sis64)
+ trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
+ else
+ trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
@@ -488,16 +567,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
- dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
- ioarcb->write_data_transfer_length = 0;
+ ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
- ioarcb->write_ioadl_len = 0;
+ ioarcb->ioadl_len = 0;
ioarcb->read_ioadl_len = 0;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+
+ if (ipr_cmd->ioa_cfg->sis64)
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
+
ioasa->ioasc = 0;
ioasa->residual_data_len = 0;
ioasa->u.gata.status = 0;
@@ -562,10 +648,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->allow_interrupts = 0;
/* Set interrupt mask to stop all new interrupts */
- writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+ if (ioa_cfg->sis64)
+ writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+ else
+ writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
/* Clear any pending interrupts */
- writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
+ if (ioa_cfg->sis64)
+ writel(~0, ioa_cfg->regs.clr_interrupt_reg);
+ writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
}
@@ -693,6 +784,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
}
/**
+ * ipr_send_command - Send driver initiated requests.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a command to the adapter using the correct write call.
+ * In the case of sis64, calculate the ioarcb size required. Then or in the
+ * appropriate bits.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
+
+ if (ioa_cfg->sis64) {
+ /* The default size is 256 bytes */
+ send_dma_addr |= 0x1;
+
+ /* If the number of ioadls * size of ioadl > 128 bytes,
+ then use a 512 byte ioarcb */
+ if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
+ send_dma_addr |= 0x4;
+ writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+ } else
+ writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+}
+
+/**
* ipr_do_req - Send driver initiated requests.
* @ipr_cmd: ipr command struct
* @done: done function
@@ -724,8 +844,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
mb();
- writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
- ioa_cfg->regs.ioarrin_reg);
+
+ ipr_send_command(ipr_cmd);
}
/**
@@ -747,6 +867,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_init_ioadl - initialize the ioadl for the correct SIS type
+ * @ipr_cmd: ipr command struct
+ * @dma_addr: dma address
+ * @len: transfer length
+ * @flags: ioadl flag value
+ *
+ * This function initializes an ioadl in the case where there is only a single
+ * descriptor.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
+ u32 len, int flags)
+{
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+
+ ipr_cmd->dma_use_sg = 1;
+
+ if (ipr_cmd->ioa_cfg->sis64) {
+ ioadl64->flags = cpu_to_be32(flags);
+ ioadl64->data_len = cpu_to_be32(len);
+ ioadl64->address = cpu_to_be64(dma_addr);
+
+ ipr_cmd->ioarcb.ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
+ ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+ } else {
+ ioadl->flags_and_data_len = cpu_to_be32(flags | len);
+ ioadl->address = cpu_to_be32(dma_addr);
+
+ if (flags == IPR_IOADL_FLAGS_READ_LAST) {
+ ipr_cmd->ioarcb.read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+ ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
+ } else {
+ ipr_cmd->ioarcb.ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+ ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+ }
+ }
+}
+
+/**
* ipr_send_blocking_cmd - Send command and sleep on its completion.
* @ipr_cmd: ipr command struct
* @timeout_func: function to invoke if command times out
@@ -803,11 +968,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
- ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ipr_cmd->ioadl[0].flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
- ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
+ ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
+ sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
ipr_cmd->done = ipr_process_ccn;
@@ -817,22 +979,54 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
mb();
- writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
- ioa_cfg->regs.ioarrin_reg);
+
+ ipr_send_command(ipr_cmd);
} else {
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
}
}
/**
+ * ipr_update_ata_class - Update the ata class in the resource entry
+ * @res: resource entry struct
+ * @proto: cfgte device bus protocol value
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
+{
+ switch(proto) {
+ case IPR_PROTO_SATA:
+ case IPR_PROTO_SAS_STP:
+ res->ata_class = ATA_DEV_ATA;
+ break;
+ case IPR_PROTO_SATA_ATAPI:
+ case IPR_PROTO_SAS_STP_ATAPI:
+ res->ata_class = ATA_DEV_ATAPI;
+ break;
+ default:
+ res->ata_class = ATA_DEV_UNKNOWN;
+ break;
+ };
+}
+
+/**
* ipr_init_res_entry - Initialize a resource entry struct.
* @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
*
* Return value:
* none
**/
-static void ipr_init_res_entry(struct ipr_resource_entry *res)
+static void ipr_init_res_entry(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
{
+ int found = 0;
+ unsigned int proto;
+ struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+ struct ipr_resource_entry *gscsi_res = NULL;
+
res->needs_sync_complete = 0;
res->in_erp = 0;
res->add_to_ml = 0;
@@ -840,6 +1034,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
res->resetting_device = 0;
res->sdev = NULL;
res->sata_port = NULL;
+
+ if (ioa_cfg->sis64) {
+ proto = cfgtew->u.cfgte64->proto;
+ res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->qmodel = IPR_QUEUEING_MODEL64(res);
+ res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+
+ memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path));
+
+ res->bus = 0;
+ res->lun = scsilun_to_int(&res->dev_lun);
+
+ if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+ list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
+ if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
+ found = 1;
+ res->target = gscsi_res->target;
+ break;
+ }
+ }
+ if (!found) {
+ res->target = find_first_zero_bit(ioa_cfg->target_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->target_ids);
+ }
+
+ memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+ sizeof(res->dev_lun.scsi_lun));
+ } else if (res->type == IPR_RES_TYPE_IOAFP) {
+ res->bus = IPR_IOAFP_VIRTUAL_BUS;
+ res->target = 0;
+ } else if (res->type == IPR_RES_TYPE_ARRAY) {
+ res->bus = IPR_ARRAY_VIRTUAL_BUS;
+ res->target = find_first_zero_bit(ioa_cfg->array_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->array_ids);
+ } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
+ res->bus = IPR_VSET_VIRTUAL_BUS;
+ res->target = find_first_zero_bit(ioa_cfg->vset_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->vset_ids);
+ } else {
+ res->target = find_first_zero_bit(ioa_cfg->target_ids,
+ ioa_cfg->max_devs_supported);
+ set_bit(res->target, ioa_cfg->target_ids);
+ }
+ } else {
+ proto = cfgtew->u.cfgte->proto;
+ res->qmodel = IPR_QUEUEING_MODEL(res);
+ res->flags = cfgtew->u.cfgte->flags;
+ if (res->flags & IPR_IS_IOA_RESOURCE)
+ res->type = IPR_RES_TYPE_IOAFP;
+ else
+ res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+ res->bus = cfgtew->u.cfgte->res_addr.bus;
+ res->target = cfgtew->u.cfgte->res_addr.target;
+ res->lun = cfgtew->u.cfgte->res_addr.lun;
+ }
+
+ ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_is_same_device - Determine if two devices are the same.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * 1 if the devices are the same / 0 otherwise
+ **/
+static int ipr_is_same_device(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
+{
+ if (res->ioa_cfg->sis64) {
+ if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
+ sizeof(cfgtew->u.cfgte64->dev_id)) &&
+ !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
+ sizeof(cfgtew->u.cfgte64->lun))) {
+ return 1;
+ }
+ } else {
+ if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
+ res->target == cfgtew->u.cfgte->res_addr.target &&
+ res->lun == cfgtew->u.cfgte->res_addr.lun)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * ipr_format_resource_path - Format the resource path for printing.
+ * @res_path: resource path
+ * @buf: buffer
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_resource_path(u8 *res_path, char *buffer)
+{
+ int i;
+
+ sprintf(buffer, "%02X", res_path[0]);
+ for (i=1; res_path[i] != 0xff; i++)
+ sprintf(buffer, "%s-%02X", buffer, res_path[i]);
+
+ return buffer;
+}
+
+/**
+ * ipr_update_res_entry - Update the resource entry.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_update_res_entry(struct ipr_resource_entry *res,
+ struct ipr_config_table_entry_wrapper *cfgtew)
+{
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+ unsigned int proto;
+ int new_path = 0;
+
+ if (res->ioa_cfg->sis64) {
+ res->flags = cfgtew->u.cfgte64->flags;
+ res->res_flags = cfgtew->u.cfgte64->res_flags;
+ res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+
+ memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
+ sizeof(struct ipr_std_inq_data));
+
+ res->qmodel = IPR_QUEUEING_MODEL64(res);
+ proto = cfgtew->u.cfgte64->proto;
+ res->res_handle = cfgtew->u.cfgte64->res_handle;
+ res->dev_id = cfgtew->u.cfgte64->dev_id;
+
+ memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+ sizeof(res->dev_lun.scsi_lun));
+
+ if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path))) {
+ memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+ sizeof(res->res_path));
+ new_path = 1;
+ }
+
+ if (res->sdev && new_path)
+ sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
+ ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+ } else {
+ res->flags = cfgtew->u.cfgte->flags;
+ if (res->flags & IPR_IS_IOA_RESOURCE)
+ res->type = IPR_RES_TYPE_IOAFP;
+ else
+ res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+ memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
+ sizeof(struct ipr_std_inq_data));
+
+ res->qmodel = IPR_QUEUEING_MODEL(res);
+ proto = cfgtew->u.cfgte->proto;
+ res->res_handle = cfgtew->u.cfgte->res_handle;
+ }
+
+ ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_clear_res_target - Clear the bit in the bit map representing the target
+ * for the resource.
+ * @res: resource entry struct
+ * @cfgtew: config table entry wrapper struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_clear_res_target(struct ipr_resource_entry *res)
+{
+ struct ipr_resource_entry *gscsi_res = NULL;
+ struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+
+ if (!ioa_cfg->sis64)
+ return;
+
+ if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(res->target, ioa_cfg->array_ids);
+ else if (res->bus == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(res->target, ioa_cfg->vset_ids);
+ else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+ list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
+ if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
+ return;
+ clear_bit(res->target, ioa_cfg->target_ids);
+
+ } else if (res->bus == 0)
+ clear_bit(res->target, ioa_cfg->target_ids);
}
/**
@@ -851,17 +1244,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
* none
**/
static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_hostrcb *hostrcb)
+ struct ipr_hostrcb *hostrcb)
{
struct ipr_resource_entry *res = NULL;
- struct ipr_config_table_entry *cfgte;
+ struct ipr_config_table_entry_wrapper cfgtew;
+ __be32 cc_res_handle;
+
u32 is_ndn = 1;
- cfgte = &hostrcb->hcam.u.ccn.cfgte;
+ if (ioa_cfg->sis64) {
+ cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
+ cc_res_handle = cfgtew.u.cfgte64->res_handle;
+ } else {
+ cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
+ cc_res_handle = cfgtew.u.cfgte->res_handle;
+ }
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
- sizeof(cfgte->res_addr))) {
+ if (res->res_handle == cc_res_handle) {
is_ndn = 0;
break;
}
@@ -879,20 +1279,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_resource_entry, queue);
list_del(&res->queue);
- ipr_init_res_entry(res);
+ ipr_init_res_entry(res, &cfgtew);
list_add_tail(&res->queue, &ioa_cfg->used_res_q);
}
- memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+ ipr_update_res_entry(res, &cfgtew);
if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
if (res->sdev) {
res->del_from_ml = 1;
- res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
+ res->res_handle = IPR_INVALID_RES_HANDLE;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
- } else
+ } else {
+ ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ }
} else if (!res->sdev) {
res->add_to_ml = 1;
if (ioa_cfg->allow_ml_add_del)
@@ -1044,8 +1446,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_hostrcb *hostrcb)
{
- struct ipr_hostrcb_type_12_error *error =
- &hostrcb->hcam.u.error.u.type_12_error;
+ struct ipr_hostrcb_type_12_error *error;
+
+ if (ioa_cfg->sis64)
+ error = &hostrcb->hcam.u.error64.u.type_12_error;
+ else
+ error = &hostrcb->hcam.u.error.u.type_12_error;
ipr_err("-----Current Configuration-----\n");
ipr_err("Cache Directory Card Information:\n");
@@ -1138,6 +1544,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_config_error - Log a device error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int errors_logged, i;
+ struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
+ struct ipr_hostrcb_type_23_error *error;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ error = &hostrcb->hcam.u.error64.u.type_23_error;
+ errors_logged = be32_to_cpu(error->errors_logged);
+
+ ipr_err("Device Errors Detected/Logged: %d/%d\n",
+ be32_to_cpu(error->errors_detected), errors_logged);
+
+ dev_entry = error->dev;
+
+ for (i = 0; i < errors_logged; i++, dev_entry++) {
+ ipr_err_separator;
+
+ ipr_err("Device %d : %s", i + 1,
+ ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
+ ipr_log_ext_vpd(&dev_entry->vpd);
+
+ ipr_err("-----New Device Information-----\n");
+ ipr_log_ext_vpd(&dev_entry->new_vpd);
+
+ ipr_err("Cache Directory Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
+
+ ipr_err("Adapter Card Information:\n");
+ ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
+ }
+}
+
+/**
* ipr_log_config_error - Log a configuration error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -1331,7 +1779,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
{
struct ipr_hostrcb_type_17_error *error;
- error = &hostrcb->hcam.u.error.u.type_17_error;
+ if (ioa_cfg->sis64)
+ error = &hostrcb->hcam.u.error64.u.type_17_error;
+ else
+ error = &hostrcb->hcam.u.error.u.type_17_error;
+
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
strim(error->failure_reason);
@@ -1438,6 +1890,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
}
+/**
+ * ipr_log64_fabric_path - Log a fabric path error
+ * @hostrcb: hostrcb struct
+ * @fabric: fabric descriptor
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb64_fabric_desc *fabric)
+{
+ int i, j;
+ u8 path_state = fabric->path_state;
+ u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+ u8 state = path_state & IPR_PATH_STATE_MASK;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+ if (path_active_desc[i].active != active)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+ if (path_state_desc[j].state != state)
+ continue;
+
+ ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
+ path_active_desc[i].desc, path_state_desc[j].desc,
+ ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+ return;
+ }
+ }
+
+ ipr_err("Path state=%02X Resource Path=%s\n", path_state,
+ ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+}
+
static const struct {
u8 type;
char *desc;
@@ -1547,6 +2035,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
}
/**
+ * ipr_log64_path_elem - Log a fabric path element.
+ * @hostrcb: hostrcb struct
+ * @cfg: fabric path element struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
+ struct ipr_hostrcb64_config_element *cfg)
+{
+ int i, j;
+ u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
+ u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+ u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+ if (path_type_desc[i].type != type)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+ if (path_status_desc[j].status != status)
+ continue;
+
+ ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
+ path_status_desc[j].desc, path_type_desc[i].desc,
+ ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ return;
+ }
+ }
+ ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
+ "WWN=%08X%08X\n", cfg->type_status,
+ ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
* ipr_log_fabric_error - Log a fabric error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -1584,6 +2115,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
}
/**
+ * ipr_log_sis64_array_error - Log a sis64 array error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ int i, num_entries;
+ struct ipr_hostrcb_type_24_error *error;
+ struct ipr_hostrcb64_array_data_entry *array_entry;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+ const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
+
+ error = &hostrcb->hcam.u.error64.u.type_24_error;
+
+ ipr_err_separator;
+
+ ipr_err("RAID %s Array Configuration: %s\n",
+ error->protection_level,
+ ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
+
+ ipr_err_separator;
+
+ array_entry = error->array_member;
+ num_entries = min_t(u32, be32_to_cpu(error->num_entries),
+ sizeof(error->array_member));
+
+ for (i = 0; i < num_entries; i++, array_entry++) {
+
+ if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
+ continue;
+
+ if (error->exposed_mode_adn == i)
+ ipr_err("Exposed Array Member %d:\n", i);
+ else
+ ipr_err("Array Member %d:\n", i);
+
+ ipr_err("Array Member %d:\n", i);
+ ipr_log_ext_vpd(&array_entry->vpd);
+ ipr_err("Current Location: %s",
+ ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
+ ipr_err("Expected Location: %s",
+ ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
+
+ ipr_err_separator;
+ }
+}
+
+/**
+ * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
+ * @ioa_cfg: ioa config struct
+ * @hostrcb: hostrcb struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_hostrcb *hostrcb)
+{
+ struct ipr_hostrcb_type_30_error *error;
+ struct ipr_hostrcb64_fabric_desc *fabric;
+ struct ipr_hostrcb64_config_element *cfg;
+ int i, add_len;
+
+ error = &hostrcb->hcam.u.error64.u.type_30_error;
+
+ error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+ ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+ add_len = be32_to_cpu(hostrcb->hcam.length) -
+ (offsetof(struct ipr_hostrcb64_error, u) +
+ offsetof(struct ipr_hostrcb_type_30_error, desc));
+
+ for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+ ipr_log64_fabric_path(hostrcb, fabric);
+ for_each_fabric_cfg(fabric, cfg)
+ ipr_log64_path_elem(hostrcb, cfg);
+
+ add_len -= be16_to_cpu(fabric->length);
+ fabric = (struct ipr_hostrcb64_fabric_desc *)
+ ((unsigned long)fabric + be16_to_cpu(fabric->length));
+ }
+
+ ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
+/**
* ipr_log_generic_error - Log an adapter error.
* @ioa_cfg: ioa config struct
* @hostrcb: hostrcb struct
@@ -1642,13 +2263,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
- ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+ if (ioa_cfg->sis64)
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+ else
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
- if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
- ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
+ if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
+ ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
/* Tell the midlayer we had a bus reset so it will handle the UA properly */
scsi_report_bus_reset(ioa_cfg->host,
- hostrcb->hcam.u.error.failing_dev_res_addr.bus);
+ hostrcb->hcam.u.error.fd_res_addr.bus);
}
error_index = ipr_get_error(ioasc);
@@ -1696,6 +2320,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
case IPR_HOST_RCB_OVERLAY_ID_20:
ipr_log_fabric_error(ioa_cfg, hostrcb);
break;
+ case IPR_HOST_RCB_OVERLAY_ID_23:
+ ipr_log_sis64_config_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_24:
+ case IPR_HOST_RCB_OVERLAY_ID_26:
+ ipr_log_sis64_array_error(ioa_cfg, hostrcb);
+ break;
+ case IPR_HOST_RCB_OVERLAY_ID_30:
+ ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
+ break;
case IPR_HOST_RCB_OVERLAY_ID_1:
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
default:
@@ -1720,7 +2354,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
- u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+ u32 fd_ioasc;
+
+ if (ioa_cfg->sis64)
+ fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+ else
+ fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -1845,12 +2484,14 @@ static const struct ipr_ses_table_entry *
ipr_find_ses_entry(struct ipr_resource_entry *res)
{
int i, j, matches;
+ struct ipr_std_inq_vpids *vpids;
const struct ipr_ses_table_entry *ste = ipr_ses_table;
for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
if (ste->compare_product_id_byte[j] == 'X') {
- if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
+ vpids = &res->std_inq_data.vpids;
+ if (vpids->product_id[j] == ste->product_id[j])
matches++;
else
break;
@@ -1885,10 +2526,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi
/* Loop through each config table entry in the config table buffer */
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
+ if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
continue;
- if (bus != res->cfgte.res_addr.bus)
+ if (bus != res->bus)
continue;
if (!(ste = ipr_find_ses_entry(res)))
@@ -1934,6 +2575,31 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
}
/**
+ * ipr_get_sis64_dump_data_section - Dump IOA memory
+ * @ioa_cfg: ioa config struct
+ * @start_addr: adapter address to dump
+ * @dest: destination kernel buffer
+ * @length_in_words: length to dump in 4 byte words
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
+ u32 start_addr,
+ __be32 *dest, u32 length_in_words)
+{
+ int i;
+
+ for (i = 0; i < length_in_words; i++) {
+ writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
+ *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
+ dest++;
+ }
+
+ return 0;
+}
+
+/**
* ipr_get_ldump_data_section - Dump IOA memory
* @ioa_cfg: ioa config struct
* @start_addr: adapter address to dump
@@ -1950,9 +2616,13 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
volatile u32 temp_pcii_reg;
int i, delay = 0;
+ if (ioa_cfg->sis64)
+ return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
+ dest, length_in_words);
+
/* Write IOA interrupt reg starting LDUMP state */
writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
- ioa_cfg->regs.set_uproc_interrupt_reg);
+ ioa_cfg->regs.set_uproc_interrupt_reg32);
/* Wait for IO debug acknowledge */
if (ipr_wait_iodbg_ack(ioa_cfg,
@@ -1971,7 +2641,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
/* Signal address valid - clear IOA Reset alert */
writel(IPR_UPROCI_RESET_ALERT,
- ioa_cfg->regs.clr_uproc_interrupt_reg);
+ ioa_cfg->regs.clr_uproc_interrupt_reg32);
for (i = 0; i < length_in_words; i++) {
/* Wait for IO debug acknowledge */
@@ -1996,10 +2666,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
/* Signal end of block transfer. Set reset alert then clear IO debug ack */
writel(IPR_UPROCI_RESET_ALERT,
- ioa_cfg->regs.set_uproc_interrupt_reg);
+ ioa_cfg->regs.set_uproc_interrupt_reg32);
writel(IPR_UPROCI_IO_DEBUG_ALERT,
- ioa_cfg->regs.clr_uproc_interrupt_reg);
+ ioa_cfg->regs.clr_uproc_interrupt_reg32);
/* Signal dump data received - Clear IO debug Ack */
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
@@ -2008,7 +2678,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
temp_pcii_reg =
- readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+ readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
return 0;
@@ -2207,6 +2877,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
u32 num_entries, start_off, end_off;
u32 bytes_to_copy, bytes_copied, rc;
struct ipr_sdt *sdt;
+ int valid = 1;
int i;
ENTER;
@@ -2220,7 +2891,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
start_addr = readl(ioa_cfg->ioa_mailbox);
- if (!ipr_sdt_is_fmt2(start_addr)) {
+ if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
dev_err(&ioa_cfg->pdev->dev,
"Invalid dump table format: %lx\n", start_addr);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -2249,7 +2920,6 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
/* IOA Dump entry */
ipr_init_dump_entry_hdr(&ioa_dump->hdr);
- ioa_dump->format = IPR_SDT_FMT2;
ioa_dump->hdr.len = 0;
ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
@@ -2264,7 +2934,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
sizeof(struct ipr_sdt) / sizeof(__be32));
/* Smart Dump table is ready to use and the first entry is valid */
- if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
+ if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+ (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
dev_err(&ioa_cfg->pdev->dev,
"Dump of IOA failed. Dump table not valid: %d, %X.\n",
rc, be32_to_cpu(sdt->hdr.state));
@@ -2288,12 +2959,19 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
}
if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
- sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
- start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
- end_off = be32_to_cpu(sdt->entry[i].end_offset);
-
- if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
- bytes_to_copy = end_off - start_off;
+ sdt_word = be32_to_cpu(sdt->entry[i].start_token);
+ if (ioa_cfg->sis64)
+ bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
+ else {
+ start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
+ end_off = be32_to_cpu(sdt->entry[i].end_token);
+
+ if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
+ bytes_to_copy = end_off - start_off;
+ else
+ valid = 0;
+ }
+ if (valid) {
if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
continue;
@@ -2422,9 +3100,9 @@ restart:
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
- bus = res->cfgte.res_addr.bus;
- target = res->cfgte.res_addr.target;
- lun = res->cfgte.res_addr.lun;
+ bus = res->bus;
+ target = res->target;
+ lun = res->lun;
res->add_to_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -2478,105 +3156,6 @@ static struct bin_attribute ipr_trace_attr = {
};
#endif
-static const struct {
- enum ipr_cache_state state;
- char *name;
-} cache_state [] = {
- { CACHE_NONE, "none" },
- { CACHE_DISABLED, "disabled" },
- { CACHE_ENABLED, "enabled" }
-};
-
-/**
- * ipr_show_write_caching - Show the write caching attribute
- * @dev: device struct
- * @buf: buffer
- *
- * Return value:
- * number of bytes printed to buffer
- **/
-static ssize_t ipr_show_write_caching(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- unsigned long lock_flags = 0;
- int i, len = 0;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
- if (cache_state[i].state == ioa_cfg->cache_state) {
- len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
- break;
- }
- }
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return len;
-}
-
-
-/**
- * ipr_store_write_caching - Enable/disable adapter write cache
- * @dev: device struct
- * @buf: buffer
- * @count: buffer size
- *
- * This function will enable/disable adapter write cache.
- *
- * Return value:
- * count on success / other on failure
- **/
-static ssize_t ipr_store_write_caching(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- unsigned long lock_flags = 0;
- enum ipr_cache_state new_state = CACHE_INVALID;
- int i;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (ioa_cfg->cache_state == CACHE_NONE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
- if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
- new_state = cache_state[i].state;
- break;
- }
- }
-
- if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
- return -EINVAL;
-
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->cache_state == new_state) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return count;
- }
-
- ioa_cfg->cache_state = new_state;
- dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
- new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
- if (!ioa_cfg->in_reset_reload)
- ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-
- return count;
-}
-
-static struct device_attribute ipr_ioa_cache_attr = {
- .attr = {
- .name = "write_cache",
- .mode = S_IRUGO | S_IWUSR,
- },
- .show = ipr_show_write_caching,
- .store = ipr_store_write_caching
-};
-
/**
* ipr_show_fw_version - Show the firmware version
* @dev: class device struct
@@ -2976,6 +3555,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
}
/**
+ * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
+ * @ipr_cmd: ipr command struct
+ * @sglist: scatter/gather list
+ *
+ * Builds a microcode download IOA data list (IOADL).
+ *
+ **/
+static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
+ struct ipr_sglist *sglist)
+{
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+ struct scatterlist *scatterlist = sglist->scatterlist;
+ int i;
+
+ ipr_cmd->dma_use_sg = sglist->num_dma_sg;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+ for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+ ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+ }
+
+ ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
* ipr_build_ucode_ioadl - Build a microcode download IOADL
* @ipr_cmd: ipr command struct
* @sglist: scatter/gather list
@@ -2987,14 +3597,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
struct ipr_sglist *sglist)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
struct scatterlist *scatterlist = sglist->scatterlist;
int i;
ipr_cmd->dma_use_sg = sglist->num_dma_sg;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
- ioarcb->write_ioadl_len =
+ ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+ ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
@@ -3146,7 +3757,6 @@ static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_ioa_state_attr,
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
- &ipr_ioa_cache_attr,
NULL,
};
@@ -3450,7 +4060,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res)
- len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
+ len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return len;
}
@@ -3463,8 +4073,43 @@ static struct device_attribute ipr_adapter_handle_attr = {
.show = ipr_show_adapter_handle
};
+/**
+ * ipr_show_resource_path - Show the resource path for this device.
+ * @dev: device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len = -ENXIO;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
+ ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_resource_path_attr = {
+ .attr = {
+ .name = "resource_path",
+ .mode = S_IRUSR,
+ },
+ .show = ipr_show_resource_path
+};
+
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
+ &ipr_resource_path_attr,
NULL,
};
@@ -3517,9 +4162,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
struct ipr_resource_entry *res;
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if ((res->cfgte.res_addr.bus == starget->channel) &&
- (res->cfgte.res_addr.target == starget->id) &&
- (res->cfgte.res_addr.lun == 0)) {
+ if ((res->bus == starget->channel) &&
+ (res->target == starget->id) &&
+ (res->lun == 0)) {
return res;
}
}
@@ -3589,6 +4234,17 @@ static int ipr_target_alloc(struct scsi_target *starget)
static void ipr_target_destroy(struct scsi_target *starget)
{
struct ipr_sata_port *sata_port = starget->hostdata;
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+
+ if (ioa_cfg->sis64) {
+ if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->array_ids);
+ else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+ clear_bit(starget->id, ioa_cfg->vset_ids);
+ else if (starget->channel == 0)
+ clear_bit(starget->id, ioa_cfg->target_ids);
+ }
if (sata_port) {
starget->hostdata = NULL;
@@ -3610,9 +4266,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
struct ipr_resource_entry *res;
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if ((res->cfgte.res_addr.bus == sdev->channel) &&
- (res->cfgte.res_addr.target == sdev->id) &&
- (res->cfgte.res_addr.lun == sdev->lun))
+ if ((res->bus == sdev->channel) &&
+ (res->target == sdev->id) &&
+ (res->lun == sdev->lun))
return res;
}
@@ -3661,6 +4317,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
struct ipr_resource_entry *res;
struct ata_port *ap = NULL;
unsigned long lock_flags = 0;
+ char buffer[IPR_MAX_RES_PATH_LENGTH];
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = sdev->hostdata;
@@ -3687,6 +4344,9 @@ static int ipr_slave_configure(struct scsi_device *sdev)
ata_sas_slave_configure(sdev, ap);
} else
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+ if (ioa_cfg->sis64)
+ sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
+ ipr_format_resource_path(&res->res_path[0], &buffer[0]));
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3828,14 +4488,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
- regs = &ioarcb->add_data.u.regs;
- ioarcb->res_handle = res->cfgte.res_handle;
+ if (ipr_cmd->ioa_cfg->sis64) {
+ regs = &ipr_cmd->i.ata_ioadl.regs;
+ ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+ } else
+ regs = &ioarcb->u.add_data.u.regs;
+
+ ioarcb->res_handle = res->res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
if (ipr_is_gata(res)) {
cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
- ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
+ ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
}
@@ -3880,19 +4545,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
res = sata_port->res;
if (res) {
rc = ipr_device_reset(ioa_cfg, res);
- switch(res->cfgte.proto) {
- case IPR_PROTO_SATA:
- case IPR_PROTO_SAS_STP:
- *classes = ATA_DEV_ATA;
- break;
- case IPR_PROTO_SATA_ATAPI:
- case IPR_PROTO_SAS_STP_ATAPI:
- *classes = ATA_DEV_ATAPI;
- break;
- default:
- *classes = ATA_DEV_UNKNOWN;
- break;
- };
+ *classes = res->ata_class;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3937,7 +4590,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
return FAILED;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_cmd->qc)
@@ -3959,7 +4612,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
spin_lock_irq(scsi_cmd->device->host->host_lock);
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
rc = -EIO;
break;
}
@@ -3998,13 +4651,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
struct ipr_resource_entry *res;
ENTER;
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
- sizeof(res->cfgte.res_handle))) {
- scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
- break;
+ if (!ioa_cfg->sis64)
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
+ scsi_report_bus_reset(ioa_cfg->host, res->bus);
+ break;
+ }
}
- }
/*
* If abort has not completed, indicate the reset has, else call the
@@ -4102,7 +4755,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
return SUCCESS;
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
- ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+ ipr_cmd->ioarcb.res_handle = res->res_handle;
cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -4239,11 +4892,29 @@ static irqreturn_t ipr_isr(int irq, void *devp)
return IRQ_NONE;
}
- int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
- /* If an interrupt on the adapter did not occur, ignore it */
+ /* If an interrupt on the adapter did not occur, ignore it.
+ * Or in the case of SIS 64, check for a stage change interrupt.
+ */
if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
+ if (ioa_cfg->sis64) {
+ int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
+
+ /* clear stage change */
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ list_del(&ioa_cfg->reset_cmd->queue);
+ del_timer(&ioa_cfg->reset_cmd->timer);
+ ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return IRQ_HANDLED;
+ }
+ }
+
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_NONE;
}
@@ -4286,8 +4957,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
do {
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
- int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+ writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
@@ -4309,6 +4980,53 @@ static irqreturn_t ipr_isr(int irq, void *devp)
}
/**
+ * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
+ * @ioa_cfg: ioa config struct
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * 0 on success / -1 on failure
+ **/
+static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
+ struct ipr_cmnd *ipr_cmd)
+{
+ int i, nseg;
+ struct scatterlist *sg;
+ u32 length;
+ u32 ioadl_flags = 0;
+ struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+
+ length = scsi_bufflen(scsi_cmd);
+ if (!length)
+ return 0;
+
+ nseg = scsi_dma_map(scsi_cmd);
+ if (nseg < 0) {
+ dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+ return -1;
+ }
+
+ ipr_cmd->dma_use_sg = nseg;
+
+ if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+ scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+ ioadl64[i].flags = cpu_to_be32(ioadl_flags);
+ ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
+ }
+
+ ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ return 0;
+}
+
+/**
* ipr_build_ioadl - Build a scatter/gather list and map the buffer
* @ioa_cfg: ioa config struct
* @ipr_cmd: ipr command struct
@@ -4325,7 +5043,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
u32 ioadl_flags = 0;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
length = scsi_bufflen(scsi_cmd);
if (!length)
@@ -4342,8 +5060,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(length);
- ioarcb->write_ioadl_len =
+ ioarcb->data_transfer_length = cpu_to_be32(length);
+ ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -4352,11 +5070,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
}
- if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
- ioadl = ioarcb->add_data.u.ioadl;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
- offsetof(struct ipr_ioarcb, add_data));
+ if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
+ ioadl = ioarcb->u.add_data.u.ioadl;
+ ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
+ offsetof(struct ipr_ioarcb, u.add_data));
ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
}
@@ -4446,18 +5163,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
- dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
- ioarcb->write_data_transfer_length = 0;
+ ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
- ioarcb->write_ioadl_len = 0;
+ ioarcb->ioadl_len = 0;
ioarcb->read_ioadl_len = 0;
ioasa->ioasc = 0;
ioasa->residual_data_len = 0;
- ioarcb->write_ioadl_addr =
- cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+
+ if (ipr_cmd->ioa_cfg->sis64)
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ }
}
/**
@@ -4489,15 +5212,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
- ipr_cmd->ioadl[0].flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
- ipr_cmd->ioadl[0].address =
- cpu_to_be32(ipr_cmd->sense_buffer_dma);
-
- ipr_cmd->ioarcb.read_ioadl_len =
- cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ipr_cmd->ioarcb.read_data_transfer_length =
- cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
+ ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
+ SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
IPR_REQUEST_SENSE_TIMEOUT * 2);
@@ -4893,9 +5609,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd;
- ioarcb->res_handle = res->cfgte.res_handle;
+ ioarcb->res_handle = res->res_handle;
ipr_cmd->done = ipr_scsi_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0)
@@ -4916,13 +5632,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (likely(rc == 0))
- rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+ if (likely(rc == 0)) {
+ if (ioa_cfg->sis64)
+ rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+ else
+ rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+ }
if (likely(rc == 0)) {
mb();
- writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
- ioa_cfg->regs.ioarrin_reg);
+ ipr_send_command(ipr_cmd);
} else {
list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
return SCSI_MLQUEUE_HOST_BUSY;
@@ -5035,20 +5754,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
goto out_unlock;
}
- switch(res->cfgte.proto) {
- case IPR_PROTO_SATA:
- case IPR_PROTO_SAS_STP:
- ap->link.device[0].class = ATA_DEV_ATA;
- break;
- case IPR_PROTO_SATA_ATAPI:
- case IPR_PROTO_SAS_STP_ATAPI:
- ap->link.device[0].class = ATA_DEV_ATAPI;
- break;
- default:
- ap->link.device[0].class = ATA_DEV_UNKNOWN;
+ ap->link.device[0].class = res->ata_class;
+ if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
ata_port_disable(ap);
- break;
- };
out_unlock:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5134,8 +5842,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
- scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
- res->cfgte.res_addr.target);
+ scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5146,6 +5853,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
+ * @ipr_cmd: ipr command struct
+ * @qc: ATA queued command
+ *
+ **/
+static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
+ struct ata_queued_cmd *qc)
+{
+ u32 ioadl_flags = 0;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+ struct ipr_ioadl64_desc *last_ioadl64 = NULL;
+ int len = qc->nbytes;
+ struct scatterlist *sg;
+ unsigned int si;
+ dma_addr_t dma_addr = ipr_cmd->dma_addr;
+
+ if (len == 0)
+ return;
+
+ if (qc->dma_dir == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ } else if (qc->dma_dir == DMA_FROM_DEVICE)
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+ ioarcb->data_transfer_length = cpu_to_be32(len);
+ ioarcb->ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ ioadl64->flags = cpu_to_be32(ioadl_flags);
+ ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
+ ioadl64->address = cpu_to_be64(sg_dma_address(sg));
+
+ last_ioadl64 = ioadl64;
+ ioadl64++;
+ }
+
+ if (likely(last_ioadl64))
+ last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
* ipr_build_ata_ioadl - Build an ATA scatter/gather list
* @ipr_cmd: ipr command struct
* @qc: ATA queued command
@@ -5156,7 +5909,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
{
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
struct ipr_ioadl_desc *last_ioadl = NULL;
int len = qc->nbytes;
struct scatterlist *sg;
@@ -5168,8 +5921,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
if (qc->dma_dir == DMA_TO_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_WRITE;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
- ioarcb->write_data_transfer_length = cpu_to_be32(len);
- ioarcb->write_ioadl_len =
+ ioarcb->data_transfer_length = cpu_to_be32(len);
+ ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
} else if (qc->dma_dir == DMA_FROM_DEVICE) {
ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -5212,25 +5965,34 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
- regs = &ioarcb->add_data.u.regs;
- memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
- ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
+ if (ioa_cfg->sis64) {
+ regs = &ipr_cmd->i.ata_ioadl.regs;
+ ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+ } else
+ regs = &ioarcb->u.add_data.u.regs;
+
+ memset(regs, 0, sizeof(*regs));
+ ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
- ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+ ipr_cmd->ioarcb.res_handle = res->res_handle;
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
ipr_cmd->dma_use_sg = qc->n_elem;
- ipr_build_ata_ioadl(ipr_cmd, qc);
+ if (ioa_cfg->sis64)
+ ipr_build_ata_ioadl64(ipr_cmd, qc);
+ else
+ ipr_build_ata_ioadl(ipr_cmd, qc);
+
regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
ipr_copy_sata_tf(regs, &qc->tf);
memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
switch (qc->tf.protocol) {
case ATA_PROT_NODATA:
@@ -5257,8 +6019,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
}
mb();
- writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
- ioa_cfg->regs.ioarrin_reg);
+
+ ipr_send_command(ipr_cmd);
+
return 0;
}
@@ -5459,7 +6222,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
* ipr_set_supported_devs - Send Set Supported Devices for a device
* @ipr_cmd: ipr command struct
*
- * This function send a Set Supported Devices to the adapter
+ * This function sends a Set Supported Devices to the adapter
*
* Return value:
* IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
@@ -5468,7 +6231,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_resource_entry *res = ipr_cmd->u.res;
@@ -5479,28 +6241,28 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
continue;
ipr_cmd->u.res = res;
- ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
+ ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
+ ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
- ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
- sizeof(struct ipr_supported_device));
- ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
- offsetof(struct ipr_misc_cbs, supp_dev));
- ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ioarcb->write_data_transfer_length =
- cpu_to_be32(sizeof(struct ipr_supported_device));
+ ipr_init_ioadl(ipr_cmd,
+ ioa_cfg->vpd_cbs_dma +
+ offsetof(struct ipr_misc_cbs, supp_dev),
+ sizeof(struct ipr_supported_device),
+ IPR_IOADL_FLAGS_WRITE_LAST);
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
IPR_SET_SUP_DEVICE_TIMEOUT);
- ipr_cmd->job_step = ipr_set_supported_devs;
+ if (!ioa_cfg->sis64)
+ ipr_cmd->job_step = ipr_set_supported_devs;
return IPR_RC_JOB_RETURN;
}
@@ -5508,36 +6270,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_setup_write_cache - Disable write cache if needed
- * @ipr_cmd: ipr command struct
- *
- * This function sets up adapters write cache to desired setting
- *
- * Return value:
- * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
- **/
-static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
-{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- ipr_cmd->job_step = ipr_set_supported_devs;
- ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
- struct ipr_resource_entry, queue);
-
- if (ioa_cfg->cache_state != CACHE_DISABLED)
- return IPR_RC_JOB_CONTINUE;
-
- ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
- ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
- ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
-
- ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
-
- return IPR_RC_JOB_RETURN;
-}
-
-/**
* ipr_get_mode_page - Locate specified mode page
* @mode_pages: mode page buffer
* @page_code: page code to find
@@ -5695,10 +6427,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
* none
**/
static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
- __be32 res_handle, u8 parm, u32 dma_addr,
- u8 xfer_len)
+ __be32 res_handle, u8 parm,
+ dma_addr_t dma_addr, u8 xfer_len)
{
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
ioarcb->res_handle = res_handle;
@@ -5708,11 +6439,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
ioarcb->cmd_pkt.cdb[1] = parm;
ioarcb->cmd_pkt.cdb[4] = xfer_len;
- ioadl->flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
- ioadl->address = cpu_to_be32(dma_addr);
- ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
}
/**
@@ -5742,7 +6469,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
length);
- ipr_cmd->job_step = ipr_setup_write_cache;
+ ipr_cmd->job_step = ipr_set_supported_devs;
+ ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+ struct ipr_resource_entry, queue);
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
LEAVE;
@@ -5762,9 +6491,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
__be32 res_handle,
- u8 parm, u32 dma_addr, u8 xfer_len)
+ u8 parm, dma_addr_t dma_addr, u8 xfer_len)
{
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
ioarcb->res_handle = res_handle;
@@ -5773,11 +6501,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
ioarcb->cmd_pkt.cdb[4] = xfer_len;
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
- ioadl->flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
- ioadl->address = cpu_to_be32(dma_addr);
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
}
/**
@@ -5815,10 +6539,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
- ipr_cmd->job_step = ipr_setup_write_cache;
+ ipr_cmd->job_step = ipr_set_supported_devs;
+ ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+ struct ipr_resource_entry, queue);
return IPR_RC_JOB_CONTINUE;
}
@@ -5958,24 +6685,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res, *temp;
- struct ipr_config_table_entry *cfgte;
- int found, i;
+ struct ipr_config_table_entry_wrapper cfgtew;
+ int entries, found, flag, i;
LIST_HEAD(old_res);
ENTER;
- if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
+ if (ioa_cfg->sis64)
+ flag = ioa_cfg->u.cfg_table64->hdr64.flags;
+ else
+ flag = ioa_cfg->u.cfg_table->hdr.flags;
+
+ if (flag & IPR_UCODE_DOWNLOAD_REQ)
dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
list_move_tail(&res->queue, &old_res);
- for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
- cfgte = &ioa_cfg->cfg_table->dev[i];
+ if (ioa_cfg->sis64)
+ entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
+ else
+ entries = ioa_cfg->u.cfg_table->hdr.num_entries;
+
+ for (i = 0; i < entries; i++) {
+ if (ioa_cfg->sis64)
+ cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
+ else
+ cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
found = 0;
list_for_each_entry_safe(res, temp, &old_res, queue) {
- if (!memcmp(&res->cfgte.res_addr,
- &cfgte->res_addr, sizeof(cfgte->res_addr))) {
+ if (ipr_is_same_device(res, &cfgtew)) {
list_move_tail(&res->queue, &ioa_cfg->used_res_q);
found = 1;
break;
@@ -5992,24 +6731,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
res = list_entry(ioa_cfg->free_res_q.next,
struct ipr_resource_entry, queue);
list_move_tail(&res->queue, &ioa_cfg->used_res_q);
- ipr_init_res_entry(res);
+ ipr_init_res_entry(res, &cfgtew);
res->add_to_ml = 1;
}
if (found)
- memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+ ipr_update_res_entry(res, &cfgtew);
}
list_for_each_entry_safe(res, temp, &old_res, queue) {
if (res->sdev) {
res->del_from_ml = 1;
- res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
+ res->res_handle = IPR_INVALID_RES_HANDLE;
list_move_tail(&res->queue, &ioa_cfg->used_res_q);
- } else {
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
}
+ list_for_each_entry_safe(res, temp, &old_res, queue) {
+ ipr_clear_res_target(res);
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ }
+
if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
else
@@ -6033,7 +6775,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
@@ -6047,16 +6788,11 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
- ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ioarcb->read_data_transfer_length =
- cpu_to_be32(sizeof(struct ipr_config_table));
-
- ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
- ioadl->flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
+ ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
+ IPR_IOADL_FLAGS_READ_LAST);
ipr_cmd->job_step = ipr_init_res_table;
@@ -6076,10 +6812,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
* none
**/
static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
- u32 dma_addr, u8 xfer_len)
+ dma_addr_t dma_addr, u8 xfer_len)
{
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
- struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
ENTER;
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
@@ -6090,12 +6825,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
ioarcb->cmd_pkt.cdb[2] = page;
ioarcb->cmd_pkt.cdb[4] = xfer_len;
- ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
- ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
-
- ioadl->address = cpu_to_be32(dma_addr);
- ioadl->flags_and_data_len =
- cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
+ ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
LEAVE;
@@ -6166,13 +6896,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
ENTER;
- if (!ipr_inquiry_page_supported(page0, 1))
- ioa_cfg->cache_state = CACHE_NONE;
-
ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
ipr_ioafp_inquiry(ipr_cmd, 1, 3,
@@ -6240,7 +6966,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
+ * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
* @ipr_cmd: ipr command struct
*
* This function send an Identify Host Request Response Queue
@@ -6249,7 +6975,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
* Return value:
* IPR_RC_JOB_RETURN
**/
-static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
+static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
@@ -6261,19 +6987,32 @@ static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
ioarcb->cmd_pkt.cdb[2] =
- ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
+ ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
ioarcb->cmd_pkt.cdb[3] =
- ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
+ ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
ioarcb->cmd_pkt.cdb[4] =
- ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
+ ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
ioarcb->cmd_pkt.cdb[5] =
- ((u32) ioa_cfg->host_rrq_dma) & 0xff;
+ ((u64) ioa_cfg->host_rrq_dma) & 0xff;
ioarcb->cmd_pkt.cdb[7] =
((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
ioarcb->cmd_pkt.cdb[8] =
(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
+ }
+
ipr_cmd->job_step = ipr_ioafp_std_inquiry;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
@@ -6354,7 +7093,58 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->toggle_bit = 1;
/* Zero out config table */
- memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
+ memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
+}
+
+/**
+ * ipr_reset_next_stage - Process IPL stage change based on feedback register.
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long stage, stage_time;
+ u32 feedback;
+ volatile u32 int_reg;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ u64 maskval = 0;
+
+ feedback = readl(ioa_cfg->regs.init_feedback_reg);
+ stage = feedback & IPR_IPL_INIT_STAGE_MASK;
+ stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
+
+ ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
+
+ /* sanity check the stage_time value */
+ if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
+ stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
+ else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
+ stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
+
+ if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ stage_time = ioa_cfg->transop_timeout;
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+ } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+ maskval = IPR_PCII_IPL_STAGE_CHANGE;
+ maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
+ writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ return IPR_RC_JOB_CONTINUE;
+ }
+
+ ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+ ipr_cmd->timer.expires = jiffies + stage_time * HZ;
+ ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+ ipr_cmd->done = ipr_reset_ioa_job;
+ add_timer(&ipr_cmd->timer);
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+ return IPR_RC_JOB_RETURN;
}
/**
@@ -6373,7 +7163,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
volatile u32 int_reg;
ENTER;
- ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
ioa_cfg->allow_interrupts = 1;
@@ -6381,19 +7171,27 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
- ioa_cfg->regs.clr_interrupt_mask_reg);
+ ioa_cfg->regs.clr_interrupt_mask_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
return IPR_RC_JOB_CONTINUE;
}
/* Enable destructive diagnostics on IOA */
- writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
+ writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
+
+ writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
+ if (ioa_cfg->sis64)
+ writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
- writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
+ if (ioa_cfg->sis64) {
+ ipr_cmd->job_step = ipr_reset_next_stage;
+ return IPR_RC_JOB_CONTINUE;
+ }
+
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
@@ -6463,7 +7261,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
mailbox = readl(ioa_cfg->ioa_mailbox);
- if (!ipr_sdt_is_fmt2(mailbox)) {
+ if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
ipr_unit_check_no_data(ioa_cfg);
return;
}
@@ -6472,15 +7270,20 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
- if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
- !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
+ if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
+ ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+ (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
ipr_unit_check_no_data(ioa_cfg);
return;
}
/* Find length of the first sdt entry (UC buffer) */
- length = (be32_to_cpu(sdt.entry[0].end_offset) -
- be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
+ if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
+ length = be32_to_cpu(sdt.entry[0].end_token);
+ else
+ length = (be32_to_cpu(sdt.entry[0].end_token) -
+ be32_to_cpu(sdt.entry[0].start_token)) &
+ IPR_FMT2_MBX_ADDR_MASK;
hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
struct ipr_hostrcb, queue);
@@ -6488,13 +7291,13 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
rc = ipr_get_ldump_data_section(ioa_cfg,
- be32_to_cpu(sdt.entry[0].bar_str_offset),
+ be32_to_cpu(sdt.entry[0].start_token),
(__be32 *)&hostrcb->hcam,
min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
if (!rc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
- ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+ ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
ioa_cfg->sdt_state == GET_DUMP)
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -6722,7 +7525,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
- writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
+ writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
ipr_cmd->job_step = ioa_cfg->reset;
@@ -6785,7 +7588,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
- ipr_build_ucode_ioadl(ipr_cmd, sglist);
+ if (ioa_cfg->sis64)
+ ipr_build_ucode_ioadl64(ipr_cmd, sglist);
+ else
+ ipr_build_ucode_ioadl(ipr_cmd, sglist);
ipr_cmd->job_step = ipr_reset_ucode_download_done;
ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
@@ -7154,8 +7960,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
ipr_free_cmd_blks(ioa_cfg);
pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
- pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
- ioa_cfg->cfg_table,
+ pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
+ ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7209,7 +8015,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
int i;
ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
- sizeof(struct ipr_cmnd), 8, 0);
+ sizeof(struct ipr_cmnd), 16, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
@@ -7227,13 +8033,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
ioarcb = &ipr_cmd->ioarcb;
- ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+ ipr_cmd->dma_addr = dma_addr;
+ if (ioa_cfg->sis64)
+ ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
+ else
+ ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+
ioarcb->host_response_handle = cpu_to_be32(i << 2);
- ioarcb->write_ioadl_addr =
- cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
- ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
- ioarcb->ioasa_host_pci_addr =
- cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+ if (ioa_cfg->sis64) {
+ ioarcb->u.sis64_addr_data.data_ioadl_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+ ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
+ cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+ } else {
+ ioarcb->write_ioadl_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+ ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+ ioarcb->ioasa_host_pci_addr =
+ cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+ }
ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
ipr_cmd->cmd_index = i;
ipr_cmd->ioa_cfg = ioa_cfg;
@@ -7260,13 +8078,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
ENTER;
ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
- IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
+ ioa_cfg->max_devs_supported, GFP_KERNEL);
if (!ioa_cfg->res_entries)
goto out;
- for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
+ if (ioa_cfg->sis64) {
+ ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+ ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+ ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
+ BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+ }
+
+ for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
+ ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
+ }
ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
sizeof(struct ipr_misc_cbs),
@@ -7285,11 +8114,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->host_rrq)
goto out_ipr_free_cmd_blocks;
- ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
- sizeof(struct ipr_config_table),
- &ioa_cfg->cfg_table_dma);
+ ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
+ ioa_cfg->cfg_table_size,
+ &ioa_cfg->cfg_table_dma);
- if (!ioa_cfg->cfg_table)
+ if (!ioa_cfg->u.cfg_table)
goto out_free_host_rrq;
for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7323,8 +8152,9 @@ out_free_hostrcb_dma:
ioa_cfg->hostrcb[i],
ioa_cfg->hostrcb_dma[i]);
}
- pci_free_consistent(pdev, sizeof(struct ipr_config_table),
- ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
+ pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
+ ioa_cfg->u.cfg_table,
+ ioa_cfg->cfg_table_dma);
out_free_host_rrq:
pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7399,15 +8229,21 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg->sdt_state = INACTIVE;
- if (ipr_enable_cache)
- ioa_cfg->cache_state = CACHE_ENABLED;
- else
- ioa_cfg->cache_state = CACHE_DISABLED;
ipr_initialize_bus_attr(ioa_cfg);
+ ioa_cfg->max_devs_supported = ipr_max_devs;
- host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
- host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+ if (ioa_cfg->sis64) {
+ host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
+ host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
+ if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
+ ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+ } else {
+ host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+ host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+ if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
+ ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+ }
host->max_channel = IPR_MAX_BUS_TO_SCAN;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7419,13 +8255,26 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
+ t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
+ t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
t->clr_interrupt_reg = base + p->clr_interrupt_reg;
+ t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
t->sense_interrupt_reg = base + p->sense_interrupt_reg;
+ t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
t->ioarrin_reg = base + p->ioarrin_reg;
t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
+ t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
+ t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
+ t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
+
+ if (ioa_cfg->sis64) {
+ t->init_feedback_reg = base + p->init_feedback_reg;
+ t->dump_addr_reg = base + p->dump_addr_reg;
+ t->dump_data_reg = base + p->dump_data_reg;
+ }
}
/**
@@ -7497,7 +8346,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg->msi_received = 0;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -7508,7 +8357,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
} else if (ipr_debug)
dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
- writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -7578,6 +8427,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto out_scsi_host_put;
}
+ /* set SIS 32 or SIS 64 */
+ ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
if (ipr_transop_timeout)
@@ -7615,7 +8466,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
pci_set_master(pdev);
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ioa_cfg->sis64) {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc < 0) {
+ dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ }
+
+ } else
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
if (rc < 0) {
dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
goto cleanup_nomem;
@@ -7657,6 +8517,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
goto cleanup_nomem;
+ if (ioa_cfg->sis64)
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+ + ((sizeof(struct ipr_config_table_entry64)
+ * ioa_cfg->max_devs_supported)));
+ else
+ ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+ + ((sizeof(struct ipr_config_table_entry)
+ * ioa_cfg->max_devs_supported)));
+
rc = ipr_alloc_mem(ioa_cfg);
if (rc < 0) {
dev_err(&pdev->dev,
@@ -7668,9 +8537,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
* If HRRQ updated interrupt is not masked, or reset alert is set,
* the card is in an unknown state and needs a hard reset
*/
- mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
- interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
- uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+ mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+ interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
+ uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
ioa_cfg->needs_hard_reset = 1;
if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
@@ -7958,9 +8827,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
- IPR_USE_LONG_TRANSOP_TIMEOUT },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
@@ -7975,9 +8841,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
IPR_USE_LONG_TRANSOP_TIMEOUT },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
- PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
- IPR_USE_LONG_TRANSOP_TIMEOUT },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -7997,6 +8876,61 @@ static struct pci_driver ipr_driver = {
};
/**
+ * ipr_halt_done - Shutdown prepare completion
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+}
+
+/**
+ * ipr_halt - Issue shutdown prepare to all adapters
+ *
+ * Return value:
+ * NOTIFY_OK on success / NOTIFY_DONE on failure
+ **/
+static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
+{
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long flags = 0;
+
+ if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+
+ spin_lock(&ipr_driver_lock);
+
+ list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ if (!ioa_cfg->allow_cmds) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ continue;
+ }
+
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
+ ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
+
+ ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ }
+ spin_unlock(&ipr_driver_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ipr_notifier = {
+ ipr_halt, NULL, 0
+};
+
+/**
* ipr_init - Module entry point
*
* Return value:
@@ -8007,6 +8941,7 @@ static int __init ipr_init(void)
ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+ register_reboot_notifier(&ipr_notifier);
return pci_register_driver(&ipr_driver);
}
@@ -8020,6 +8955,7 @@ static int __init ipr_init(void)
**/
static void __exit ipr_exit(void)
{
+ unregister_reboot_notifier(&ipr_notifier);
pci_unregister_driver(&ipr_driver);
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 19bbcf39f0c9..4c267b5e0b96 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.4.3"
-#define IPR_DRIVER_DATE "(June 10, 2009)"
+#define IPR_DRIVER_VERSION "2.5.0"
+#define IPR_DRIVER_DATE "(February 11, 2010)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,7 +55,9 @@
#define IPR_NUM_BASE_CMD_BLKS 100
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
-#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A
+
+#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
+#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A
#define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -70,15 +72,24 @@
#define IPR_SUBS_DEV_ID_572A 0x02C1
#define IPR_SUBS_DEV_ID_572B 0x02C2
#define IPR_SUBS_DEV_ID_572F 0x02C3
-#define IPR_SUBS_DEV_ID_574D 0x030B
#define IPR_SUBS_DEV_ID_574E 0x030A
#define IPR_SUBS_DEV_ID_575B 0x030D
#define IPR_SUBS_DEV_ID_575C 0x0338
-#define IPR_SUBS_DEV_ID_575D 0x033E
#define IPR_SUBS_DEV_ID_57B3 0x033A
#define IPR_SUBS_DEV_ID_57B7 0x0360
#define IPR_SUBS_DEV_ID_57B8 0x02C2
+#define IPR_SUBS_DEV_ID_57B4 0x033B
+#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C6 0x0357
+
+#define IPR_SUBS_DEV_ID_57B5 0x033C
+#define IPR_SUBS_DEV_ID_57CE 0x035E
+#define IPR_SUBS_DEV_ID_57B1 0x0355
+
+#define IPR_SUBS_DEV_ID_574D 0x0356
+#define IPR_SUBS_DEV_ID_575D 0x035D
+
#define IPR_NAME "ipr"
/*
@@ -118,6 +129,10 @@
#define IPR_NUM_LOG_HCAMS 2
#define IPR_NUM_CFG_CHG_HCAMS 2
#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+
+#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
+#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
+
#define IPR_MAX_NUM_TARGETS_PER_BUS 256
#define IPR_MAX_NUM_LUNS_PER_TARGET 256
#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
@@ -132,13 +147,15 @@
/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
- ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3)
+ ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS
#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
IPR_NUM_INTERNAL_CMD_BLKS)
#define IPR_MAX_PHYSICAL_DEVS 192
+#define IPR_DEFAULT_SIS64_DEVS 1024
+#define IPR_MAX_SIS64_DEVS 4096
#define IPR_MAX_SGLIST 64
#define IPR_IOA_MAX_SECTORS 32767
@@ -173,6 +190,7 @@
#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01
#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02
#define IPR_SET_SUPPORTED_DEVICES 0xFB
+#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80
#define IPR_IOA_SHUTDOWN 0xF7
#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05
@@ -221,9 +239,17 @@
#define IPR_SDT_FMT2_BAR5_SEL 0x5
#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8
#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2
+#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3
#define IPR_DOORBELL 0x82800000
#define IPR_RUNTIME_RESET 0x40000000
+#define IPR_IPL_INIT_MIN_STAGE_TIME 5
+#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
+#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
+#define IPR_IPL_INIT_STAGE_MASK 0xff000000
+#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff
+#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0)
+
#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0)
#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3)
#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4)
@@ -318,27 +344,27 @@ struct ipr_std_inq_data {
u8 serial_num[IPR_SERIAL_NUM_LEN];
}__attribute__ ((packed));
+#define IPR_RES_TYPE_AF_DASD 0x00
+#define IPR_RES_TYPE_GENERIC_SCSI 0x01
+#define IPR_RES_TYPE_VOLUME_SET 0x02
+#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03
+#define IPR_RES_TYPE_GENERIC_ATA 0x04
+#define IPR_RES_TYPE_ARRAY 0x05
+#define IPR_RES_TYPE_IOAFP 0xff
+
struct ipr_config_table_entry {
u8 proto;
#define IPR_PROTO_SATA 0x02
#define IPR_PROTO_SATA_ATAPI 0x03
#define IPR_PROTO_SAS_STP 0x06
-#define IPR_PROTO_SAS_STP_ATAPI 0x07
+#define IPR_PROTO_SAS_STP_ATAPI 0x07
u8 array_id;
u8 flags;
-#define IPR_IS_IOA_RESOURCE 0x80
-#define IPR_IS_ARRAY_MEMBER 0x20
-#define IPR_IS_HOT_SPARE 0x10
-
+#define IPR_IS_IOA_RESOURCE 0x80
u8 rsvd_subtype;
-#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f)
-#define IPR_SUBTYPE_AF_DASD 0
-#define IPR_SUBTYPE_GENERIC_SCSI 1
-#define IPR_SUBTYPE_VOLUME_SET 2
-#define IPR_SUBTYPE_GENERIC_ATA 4
-
-#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
-#define IPR_QUEUE_FROZEN_MODEL 0
+
+#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4)
+#define IPR_QUEUE_FROZEN_MODEL 0
#define IPR_QUEUE_NACA_MODEL 1
struct ipr_res_addr res_addr;
@@ -347,6 +373,28 @@ struct ipr_config_table_entry {
struct ipr_std_inq_data std_inq_data;
}__attribute__ ((packed, aligned (4)));
+struct ipr_config_table_entry64 {
+ u8 res_type;
+ u8 proto;
+ u8 vset_num;
+ u8 array_id;
+ __be16 flags;
+ __be16 res_flags;
+#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12)
+ __be32 res_handle;
+ u8 dev_id_type;
+ u8 reserved[3];
+ __be64 dev_id;
+ __be64 lun;
+ __be64 lun_wwn[2];
+#define IPR_MAX_RES_PATH_LENGTH 24
+ __be64 res_path;
+ struct ipr_std_inq_data std_inq_data;
+ u8 reserved2[4];
+ __be64 reserved3[2]; // description text
+ u8 reserved4[8];
+}__attribute__ ((packed, aligned (8)));
+
struct ipr_config_table_hdr {
u8 num_entries;
u8 flags;
@@ -354,13 +402,35 @@ struct ipr_config_table_hdr {
__be16 reserved;
}__attribute__((packed, aligned (4)));
+struct ipr_config_table_hdr64 {
+ __be16 num_entries;
+ __be16 reserved;
+ u8 flags;
+ u8 reserved2[11];
+}__attribute__((packed, aligned (4)));
+
struct ipr_config_table {
struct ipr_config_table_hdr hdr;
- struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS];
+ struct ipr_config_table_entry dev[0];
}__attribute__((packed, aligned (4)));
+struct ipr_config_table64 {
+ struct ipr_config_table_hdr64 hdr64;
+ struct ipr_config_table_entry64 dev[0];
+}__attribute__((packed, aligned (8)));
+
+struct ipr_config_table_entry_wrapper {
+ union {
+ struct ipr_config_table_entry *cfgte;
+ struct ipr_config_table_entry64 *cfgte64;
+ } u;
+};
+
struct ipr_hostrcb_cfg_ch_not {
- struct ipr_config_table_entry cfgte;
+ union {
+ struct ipr_config_table_entry cfgte;
+ struct ipr_config_table_entry64 cfgte64;
+ } u;
u8 reserved[936];
}__attribute__((packed, aligned (4)));
@@ -381,7 +451,7 @@ struct ipr_cmd_pkt {
#define IPR_RQTYPE_HCAM 0x02
#define IPR_RQTYPE_ATA_PASSTHRU 0x04
- u8 luntar_luntrn;
+ u8 reserved2;
u8 flags_hi;
#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80
@@ -403,7 +473,7 @@ struct ipr_cmd_pkt {
__be16 timeout;
}__attribute__ ((packed, aligned(4)));
-struct ipr_ioarcb_ata_regs {
+struct ipr_ioarcb_ata_regs { /* 22 bytes */
u8 flags;
#define IPR_ATA_FLAG_PACKET_CMD 0x80
#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
@@ -442,28 +512,49 @@ struct ipr_ioadl_desc {
__be32 address;
}__attribute__((packed, aligned (8)));
+struct ipr_ioadl64_desc {
+ __be32 flags;
+ __be32 data_len;
+ __be64 address;
+}__attribute__((packed, aligned (16)));
+
+struct ipr_ata64_ioadl {
+ struct ipr_ioarcb_ata_regs regs;
+ u16 reserved[5];
+ struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+}__attribute__((packed, aligned (16)));
+
struct ipr_ioarcb_add_data {
union {
struct ipr_ioarcb_ata_regs regs;
struct ipr_ioadl_desc ioadl[5];
__be32 add_cmd_parms[10];
- }u;
-}__attribute__ ((packed, aligned(4)));
+ } u;
+}__attribute__ ((packed, aligned (4)));
+
+struct ipr_ioarcb_sis64_add_addr_ecb {
+ __be64 ioasa_host_pci_addr;
+ __be64 data_ioadl_addr;
+ __be64 reserved;
+ __be32 ext_control_buf[4];
+}__attribute__((packed, aligned (8)));
/* IOA Request Control Block 128 bytes */
struct ipr_ioarcb {
- __be32 ioarcb_host_pci_addr;
- __be32 reserved;
+ union {
+ __be32 ioarcb_host_pci_addr;
+ __be64 ioarcb_host_pci_addr64;
+ } a;
__be32 res_handle;
__be32 host_response_handle;
__be32 reserved1;
__be32 reserved2;
__be32 reserved3;
- __be32 write_data_transfer_length;
+ __be32 data_transfer_length;
__be32 read_data_transfer_length;
__be32 write_ioadl_addr;
- __be32 write_ioadl_len;
+ __be32 ioadl_len;
__be32 read_ioadl_addr;
__be32 read_ioadl_len;
@@ -473,8 +564,14 @@ struct ipr_ioarcb {
struct ipr_cmd_pkt cmd_pkt;
- __be32 add_cmd_parms_len;
- struct ipr_ioarcb_add_data add_data;
+ __be16 add_cmd_parms_offset;
+ __be16 add_cmd_parms_len;
+
+ union {
+ struct ipr_ioarcb_add_data add_data;
+ struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data;
+ } u;
+
}__attribute__((packed, aligned (4)));
struct ipr_ioasa_vset {
@@ -676,12 +773,29 @@ struct ipr_hostrcb_device_data_entry_enhanced {
struct ipr_ext_vpd cfc_last_with_dev_vpd;
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb64_device_data_entry_enhanced {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ u8 res_path[8];
+ struct ipr_ext_vpd new_vpd;
+ u8 new_ccin[4];
+ struct ipr_ext_vpd ioa_last_with_dev_vpd;
+ struct ipr_ext_vpd cfc_last_with_dev_vpd;
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_array_data_entry {
struct ipr_vpd vpd;
struct ipr_res_addr expected_dev_res_addr;
struct ipr_res_addr dev_res_addr;
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb64_array_data_entry {
+ struct ipr_ext_vpd vpd;
+ u8 ccin[4];
+ u8 expected_res_path[8];
+ u8 res_path[8];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_array_data_entry_enhanced {
struct ipr_ext_vpd vpd;
u8 ccin[4];
@@ -733,6 +847,14 @@ struct ipr_hostrcb_type_13_error {
struct ipr_hostrcb_device_data_entry_enhanced dev[3];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_23_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ __be32 errors_detected;
+ __be32 errors_logged;
+ struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_type_04_error {
struct ipr_vpd ioa_vpd;
struct ipr_vpd cfc_vpd;
@@ -760,6 +882,22 @@ struct ipr_hostrcb_type_14_error {
struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_24_error {
+ struct ipr_ext_vpd ioa_vpd;
+ struct ipr_ext_vpd cfc_vpd;
+ u8 reserved[2];
+ u8 exposed_mode_adn;
+#define IPR_INVALID_ARRAY_DEV_NUM 0xff
+ u8 array_id;
+ u8 last_res_path[8];
+ u8 protection_level[8];
+ struct ipr_ext_vpd array_vpd;
+ u8 description[16];
+ u8 reserved2[3];
+ u8 num_entries;
+ struct ipr_hostrcb64_array_data_entry array_member[32];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_type_07_error {
u8 failure_reason[64];
struct ipr_vpd vpd;
@@ -797,6 +935,22 @@ struct ipr_hostrcb_config_element {
__be32 wwid[2];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb64_config_element {
+ __be16 length;
+ u8 descriptor_id;
+#define IPR_DESCRIPTOR_MASK 0xC0
+#define IPR_DESCRIPTOR_SIS64 0x00
+
+ u8 reserved;
+ u8 type_status;
+
+ u8 reserved2[2];
+ u8 link_rate;
+
+ u8 res_path[8];
+ __be32 wwid[2];
+}__attribute__((packed, aligned (8)));
+
struct ipr_hostrcb_fabric_desc {
__be16 length;
u8 ioa_port;
@@ -818,6 +972,20 @@ struct ipr_hostrcb_fabric_desc {
struct ipr_hostrcb_config_element elem[1];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb64_fabric_desc {
+ __be16 length;
+ u8 descriptor_id;
+
+ u8 reserved;
+ u8 path_state;
+
+ u8 reserved2[2];
+ u8 res_path[8];
+ u8 reserved3[6];
+ __be16 num_entries;
+ struct ipr_hostrcb64_config_element elem[1];
+}__attribute__((packed, aligned (8)));
+
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -830,10 +998,17 @@ struct ipr_hostrcb_type_20_error {
struct ipr_hostrcb_fabric_desc desc[1];
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb_type_30_error {
+ u8 failure_reason[64];
+ u8 reserved[3];
+ u8 num_entries;
+ struct ipr_hostrcb64_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
struct ipr_hostrcb_error {
- __be32 failing_dev_ioasc;
- struct ipr_res_addr failing_dev_res_addr;
- __be32 failing_dev_res_handle;
+ __be32 fd_ioasc;
+ struct ipr_res_addr fd_res_addr;
+ __be32 fd_res_handle;
__be32 prc;
union {
struct ipr_hostrcb_type_ff_error type_ff_error;
@@ -850,6 +1025,26 @@ struct ipr_hostrcb_error {
} u;
}__attribute__((packed, aligned (4)));
+struct ipr_hostrcb64_error {
+ __be32 fd_ioasc;
+ __be32 ioa_fw_level;
+ __be32 fd_res_handle;
+ __be32 prc;
+ __be64 fd_dev_id;
+ __be64 fd_lun;
+ u8 fd_res_path[8];
+ __be64 time_stamp;
+ u8 reserved[2];
+ union {
+ struct ipr_hostrcb_type_ff_error type_ff_error;
+ struct ipr_hostrcb_type_12_error type_12_error;
+ struct ipr_hostrcb_type_17_error type_17_error;
+ struct ipr_hostrcb_type_23_error type_23_error;
+ struct ipr_hostrcb_type_24_error type_24_error;
+ struct ipr_hostrcb_type_30_error type_30_error;
+ } u;
+}__attribute__((packed, aligned (8)));
+
struct ipr_hostrcb_raw {
__be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
}__attribute__((packed, aligned (4)));
@@ -887,7 +1082,11 @@ struct ipr_hcam {
#define IPR_HOST_RCB_OVERLAY_ID_16 0x16
#define IPR_HOST_RCB_OVERLAY_ID_17 0x17
#define IPR_HOST_RCB_OVERLAY_ID_20 0x20
-#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
+#define IPR_HOST_RCB_OVERLAY_ID_23 0x23
+#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
+#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
+#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
+#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
u8 reserved1[3];
__be32 ilid;
@@ -897,6 +1096,7 @@ struct ipr_hcam {
union {
struct ipr_hostrcb_error error;
+ struct ipr_hostrcb64_error error64;
struct ipr_hostrcb_cfg_ch_not ccn;
struct ipr_hostrcb_raw raw;
} u;
@@ -907,14 +1107,14 @@ struct ipr_hostrcb {
dma_addr_t hostrcb_dma;
struct list_head queue;
struct ipr_ioa_cfg *ioa_cfg;
+ char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
};
/* IPR smart dump table structures */
struct ipr_sdt_entry {
- __be32 bar_str_offset;
- __be32 end_offset;
- u8 entry_byte;
- u8 reserved[3];
+ __be32 start_token;
+ __be32 end_token;
+ u8 reserved[4];
u8 flags;
#define IPR_SDT_ENDIAN 0x80
@@ -960,28 +1160,48 @@ struct ipr_sata_port {
};
struct ipr_resource_entry {
- struct ipr_config_table_entry cfgte;
u8 needs_sync_complete:1;
u8 in_erp:1;
u8 add_to_ml:1;
u8 del_from_ml:1;
u8 resetting_device:1;
+ u32 bus; /* AKA channel */
+ u32 target; /* AKA id */
+ u32 lun;
+#define IPR_ARRAY_VIRTUAL_BUS 0x1
+#define IPR_VSET_VIRTUAL_BUS 0x2
+#define IPR_IOAFP_VIRTUAL_BUS 0x3
+
+#define IPR_GET_RES_PHYS_LOC(res) \
+ (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
+
+ u8 ata_class;
+
+ u8 flags;
+ __be16 res_flags;
+
+ __be32 type;
+
+ u8 qmodel;
+ struct ipr_std_inq_data std_inq_data;
+
+ __be32 res_handle;
+ __be64 dev_id;
+ struct scsi_lun dev_lun;
+ u8 res_path[8];
+
+ struct ipr_ioa_cfg *ioa_cfg;
struct scsi_device *sdev;
struct ipr_sata_port *sata_port;
struct list_head queue;
-};
+}; /* struct ipr_resource_entry */
struct ipr_resource_hdr {
u16 num_entries;
u16 reserved;
};
-struct ipr_resource_table {
- struct ipr_resource_hdr hdr;
- struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS];
-};
-
struct ipr_misc_cbs {
struct ipr_ioa_vpd ioa_vpd;
struct ipr_inquiry_page0 page0_data;
@@ -994,27 +1214,51 @@ struct ipr_misc_cbs {
struct ipr_interrupt_offsets {
unsigned long set_interrupt_mask_reg;
unsigned long clr_interrupt_mask_reg;
+ unsigned long clr_interrupt_mask_reg32;
unsigned long sense_interrupt_mask_reg;
+ unsigned long sense_interrupt_mask_reg32;
unsigned long clr_interrupt_reg;
+ unsigned long clr_interrupt_reg32;
unsigned long sense_interrupt_reg;
+ unsigned long sense_interrupt_reg32;
unsigned long ioarrin_reg;
unsigned long sense_uproc_interrupt_reg;
+ unsigned long sense_uproc_interrupt_reg32;
unsigned long set_uproc_interrupt_reg;
+ unsigned long set_uproc_interrupt_reg32;
unsigned long clr_uproc_interrupt_reg;
+ unsigned long clr_uproc_interrupt_reg32;
+
+ unsigned long init_feedback_reg;
+
+ unsigned long dump_addr_reg;
+ unsigned long dump_data_reg;
};
struct ipr_interrupts {
void __iomem *set_interrupt_mask_reg;
void __iomem *clr_interrupt_mask_reg;
+ void __iomem *clr_interrupt_mask_reg32;
void __iomem *sense_interrupt_mask_reg;
+ void __iomem *sense_interrupt_mask_reg32;
void __iomem *clr_interrupt_reg;
+ void __iomem *clr_interrupt_reg32;
void __iomem *sense_interrupt_reg;
+ void __iomem *sense_interrupt_reg32;
void __iomem *ioarrin_reg;
void __iomem *sense_uproc_interrupt_reg;
+ void __iomem *sense_uproc_interrupt_reg32;
void __iomem *set_uproc_interrupt_reg;
+ void __iomem *set_uproc_interrupt_reg32;
void __iomem *clr_uproc_interrupt_reg;
+ void __iomem *clr_uproc_interrupt_reg32;
+
+ void __iomem *init_feedback_reg;
+
+ void __iomem *dump_addr_reg;
+ void __iomem *dump_data_reg;
};
struct ipr_chip_cfg_t {
@@ -1029,6 +1273,9 @@ struct ipr_chip_t {
u16 intr_type;
#define IPR_USE_LSI 0x00
#define IPR_USE_MSI 0x01
+ u16 sis_type;
+#define IPR_SIS32 0x00
+#define IPR_SIS64 0x01
const struct ipr_chip_cfg_t *cfg;
};
@@ -1073,13 +1320,6 @@ enum ipr_sdt_state {
DUMP_OBTAINED
};
-enum ipr_cache_state {
- CACHE_NONE,
- CACHE_DISABLED,
- CACHE_ENABLED,
- CACHE_INVALID
-};
-
/* Per-controller data */
struct ipr_ioa_cfg {
char eye_catcher[8];
@@ -1099,10 +1339,17 @@ struct ipr_ioa_cfg {
u8 dual_raid:1;
u8 needs_warm_reset:1;
u8 msi_received:1;
+ u8 sis64:1;
u8 revid;
- enum ipr_cache_state cache_state;
+ /*
+ * Bitmaps for SIS64 generated target values
+ */
+ unsigned long *target_ids;
+ unsigned long *array_ids;
+ unsigned long *vset_ids;
+
u16 type; /* CCIN of the card */
u8 log_level;
@@ -1133,8 +1380,13 @@ struct ipr_ioa_cfg {
char cfg_table_start[8];
#define IPR_CFG_TBL_START "cfg"
- struct ipr_config_table *cfg_table;
+ union {
+ struct ipr_config_table *cfg_table;
+ struct ipr_config_table64 *cfg_table64;
+ } u;
dma_addr_t cfg_table_dma;
+ u32 cfg_table_size;
+ u32 max_devs_supported;
char resource_table_label[8];
#define IPR_RES_TABLE_LABEL "res_tbl"
@@ -1202,13 +1454,17 @@ struct ipr_ioa_cfg {
char ipr_cmd_label[8];
#define IPR_CMD_LABEL "ipr_cmd"
struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
- u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
-};
+ dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
struct ipr_ioarcb ioarcb;
+ union {
+ struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
+ struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+ struct ipr_ata64_ioadl ata_ioadl;
+ } i;
struct ipr_ioasa ioasa;
- struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
struct list_head queue;
struct scsi_cmnd *scsi_cmd;
struct ata_queued_cmd *qc;
@@ -1221,7 +1477,7 @@ struct ipr_cmnd {
u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
dma_addr_t sense_buffer_dma;
unsigned short dma_use_sg;
- dma_addr_t dma_handle;
+ dma_addr_t dma_addr;
struct ipr_cmnd *sibling;
union {
enum ipr_shutdown_type shutdown_type;
@@ -1314,8 +1570,6 @@ struct ipr_ioa_dump {
u32 next_page_index;
u32 page_offset;
u32 format;
-#define IPR_SDT_FMT2 2
-#define IPR_SDT_UNKNOWN 3
}__attribute__((packed, aligned (4)));
struct ipr_dump {
@@ -1377,6 +1631,13 @@ struct ipr_ucode_image_header {
#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
+#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
+ printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
+ bus, target, lun, ##__VA_ARGS__)
+
+#define ipr_res_err(ioa_cfg, res, fmt, ...) \
+ ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
+
#define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
(ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
@@ -1384,9 +1645,6 @@ struct ipr_ucode_image_header {
#define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
-#define ipr_res_err(ioa_cfg, res, fmt, ...) \
- ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
-
#define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \
{ \
if ((res).bus >= IPR_MAX_NUM_BUSES) { \
@@ -1399,14 +1657,21 @@ struct ipr_ucode_image_header {
}
#define ipr_hcam_err(hostrcb, fmt, ...) \
-{ \
- if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \
- ipr_ra_err((hostrcb)->ioa_cfg, \
- (hostrcb)->hcam.u.error.failing_dev_res_addr, \
- fmt, ##__VA_ARGS__); \
- } else { \
- dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \
- } \
+{ \
+ if (ipr_is_device(hostrcb)) { \
+ if ((hostrcb)->ioa_cfg->sis64) { \
+ printk(KERN_ERR IPR_NAME ": %s: " fmt, \
+ ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
+ &hostrcb->rp_buffer[0]), \
+ __VA_ARGS__); \
+ } else { \
+ ipr_ra_err((hostrcb)->ioa_cfg, \
+ (hostrcb)->hcam.u.error.fd_res_addr, \
+ fmt, __VA_ARGS__); \
+ } \
+ } else { \
+ dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
+ } \
}
#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
@@ -1432,7 +1697,7 @@ ipr_err("----------------------------------------------------------\n")
**/
static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
{
- return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0;
+ return res->type == IPR_RES_TYPE_IOAFP;
}
/**
@@ -1444,12 +1709,8 @@ static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
**/
static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
{
- if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
- !ipr_is_ioa_resource(res) &&
- IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD)
- return 1;
- else
- return 0;
+ return res->type == IPR_RES_TYPE_AF_DASD ||
+ res->type == IPR_RES_TYPE_REMOTE_AF_DASD;
}
/**
@@ -1461,12 +1722,7 @@ static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
**/
static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
{
- if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
- !ipr_is_ioa_resource(res) &&
- IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET)
- return 1;
- else
- return 0;
+ return res->type == IPR_RES_TYPE_VOLUME_SET;
}
/**
@@ -1478,11 +1734,7 @@ static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
**/
static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
{
- if (!ipr_is_ioa_resource(res) &&
- IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI)
- return 1;
- else
- return 0;
+ return res->type == IPR_RES_TYPE_GENERIC_SCSI;
}
/**
@@ -1495,7 +1747,7 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
{
if (ipr_is_af_dasd_device(res) ||
- (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)))
+ (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data)))
return 1;
else
return 0;
@@ -1510,11 +1762,7 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
**/
static inline int ipr_is_gata(struct ipr_resource_entry *res)
{
- if (!ipr_is_ioa_resource(res) &&
- IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
- return 1;
- else
- return 0;
+ return res->type == IPR_RES_TYPE_GENERIC_ATA;
}
/**
@@ -1526,24 +1774,35 @@ static inline int ipr_is_gata(struct ipr_resource_entry *res)
**/
static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
{
- if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL)
+ if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL)
return 1;
return 0;
}
/**
- * ipr_is_device - Determine if resource address is that of a device
- * @res_addr: resource address struct
+ * ipr_is_device - Determine if the hostrcb structure is related to a device
+ * @hostrcb: host resource control blocks struct
*
* Return value:
* 1 if AF / 0 if not AF
**/
-static inline int ipr_is_device(struct ipr_res_addr *res_addr)
+static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
{
- if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
- (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
- return 1;
-
+ struct ipr_res_addr *res_addr;
+ u8 *res_path;
+
+ if (hostrcb->ioa_cfg->sis64) {
+ res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
+ if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
+ res_path[0] == 0x81) && res_path[2] != 0xFF)
+ return 1;
+ } else {
+ res_addr = &hostrcb->hcam.u.error.fd_res_addr;
+
+ if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
+ (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
+ return 1;
+ }
return 0;
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8a89ba900588..249053a9d4fa 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -874,7 +874,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
- .eh_target_reset_handler= iscsi_eh_target_reset,
+ .eh_target_reset_handler = iscsi_eh_recover_target,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = iscsi_sw_tcp_slave_alloc,
.slave_configure = iscsi_sw_tcp_slave_configure,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 703eb6a88790..685eaec53218 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2338,7 +2338,7 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
* This function will wait for a relogin, session termination from
* userspace, or a recovery/replacement timeout.
*/
-static int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+int iscsi_eh_session_reset(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -2389,6 +2389,7 @@ failed:
mutex_unlock(&session->eh_mutex);
return SUCCESS;
}
+EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
{
@@ -2403,8 +2404,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
* iscsi_eh_target_reset - reset target
* @sc: scsi command
*
- * This will attempt to send a warm target reset. If that fails
- * then we will drop the session and attempt ERL0 recovery.
+ * This will attempt to send a warm target reset.
*/
int iscsi_eh_target_reset(struct scsi_cmnd *sc)
{
@@ -2476,12 +2476,27 @@ done:
ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&session->eh_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
+/**
+ * iscsi_eh_recover_target - reset target and possibly the session
+ * @sc: scsi command
+ *
+ * This will attempt to send a warm target reset. If that fails,
+ * we will escalate to ERL0 session recovery.
+ */
+int iscsi_eh_recover_target(struct scsi_cmnd *sc)
+{
+ int rc;
+
+ rc = iscsi_eh_target_reset(sc);
if (rc == FAILED)
rc = iscsi_eh_session_reset(sc);
return rc;
}
-EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
+EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
/*
* Pre-allocate a pool of @max items of @item_size. By default, the pool
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 84b696463a58..565e16dd74fc 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -37,6 +37,9 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
+ cmnd for menlo needs nearly twice as for firmware
+ downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
@@ -509,7 +512,6 @@ struct lpfc_hba {
int (*lpfc_hba_down_link)
(struct lpfc_hba *);
-
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
@@ -623,6 +625,9 @@ struct lpfc_hba {
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_suppress_link_up;
+#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
+#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
+#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
lpfc_vpd_t vpd; /* vital product data */
@@ -804,6 +809,9 @@ struct lpfc_hba {
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[64];
uint32_t ctx_idx;
+
+ uint8_t menlo_flag; /* menlo generic flags */
+#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c992e8328f9e..64cd17eedb64 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1939,7 +1939,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
# 0x2 = never bring up link
# Default value is 0.
*/
-LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
+LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
+ LPFC_DELAY_INIT_LINK_INDEFINITELY,
+ "Suppress Link Up at initialization");
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1966,8 +1968,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- int val = 0;
- val = vport->cfg_devloss_tmo;
+
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f3f1bf1a0a71..692c29f6048e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -83,15 +83,28 @@ struct lpfc_bsg_mbox {
struct fc_bsg_job *set_job;
};
+#define MENLO_DID 0x0000FC0E
+
+struct lpfc_bsg_menlo {
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_iocbq *rspiocbq;
+ struct lpfc_dmabuf *bmp;
+
+ /* job waiting for this iocb to finish */
+ struct fc_bsg_job *set_job;
+};
+
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
+#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
union {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
+ struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -2456,6 +2469,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
case MBX_PORT_IOV_CONTROL:
break;
case MBX_SET_VARIABLE:
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1226 mbox: set_variable 0x%x, 0x%x\n",
+ mb->un.varWords[0],
+ mb->un.varWords[1]);
+ if ((mb->un.varWords[0] == SETVAR_MLOMNT)
+ && (mb->un.varWords[1] == 1)) {
+ phba->wait_4_mlo_maint_flg = 1;
+ } else if (mb->un.varWords[0] == SETVAR_MLORST) {
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ phba->fc_topology = TOPOLOGY_PT_PT;
+ }
+ break;
case MBX_RUN_BIU_DIAG64:
case MBX_READ_EVENT_LOG:
case MBX_READ_SPARM64:
@@ -2638,6 +2663,297 @@ job_error:
}
/**
+ * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_menlo_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_bsg_menlo *menlo;
+ unsigned long flags;
+ struct menlo_response *menlo_resp;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = cmdiocbq->context1;
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+ }
+
+ menlo = &dd_data->context_un.menlo;
+ job = menlo->set_job;
+ job->dd_data = NULL; /* so timeout handler does not reply */
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->context2 && rspiocbq)
+ memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+ &rspiocbq->iocb, sizeof(IOCB_t));
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ bmp = menlo->bmp;
+ rspiocbq = menlo->rspiocbq;
+ rsp = &rspiocbq->iocb;
+
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ /* always return the xri, this would be used in the case
+ * of a menlo download to allow the data to be sent as a continuation
+ * of the exchange.
+ */
+ menlo_resp = (struct menlo_response *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+ menlo_resp->xri = rsp->ulpContext;
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & 0xff) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else
+ rc = -EACCES;
+ } else
+ job->reply->reply_payload_rcv_len =
+ rsp->un.genreq64.bdl.bdeSize;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ kfree(bmp);
+ kfree(dd_data);
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+}
+
+/**
+ * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * @job: fc_bsg_job to handle
+ *
+ * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
+ * all the command completions will return the xri for the command.
+ * For menlo data requests a gen request 64 CX is used to continue the exchange
+ * supplied in the menlo request header xri field.
+ **/
+static int
+lpfc_menlo_cmd(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+ IOCB_t *cmd, *rsp;
+ int rc = 0;
+ struct menlo_command *menlo_cmd;
+ struct menlo_response *menlo_resp;
+ struct lpfc_dmabuf *bmp = NULL;
+ int request_nseg;
+ int reply_nseg;
+ struct scatterlist *sgel = NULL;
+ int numbde;
+ dma_addr_t busaddr;
+ struct bsg_job_data *dd_data;
+ struct ulp_bde64 *bpl = NULL;
+
+ /* in case no data is returned return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct menlo_command)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2784 Received MENLO_CMD request below "
+ "minimum size\n");
+ rc = -ERANGE;
+ goto no_dd_data;
+ }
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2785 Received MENLO_CMD reply below "
+ "minimum size\n");
+ rc = -ERANGE;
+ goto no_dd_data;
+ }
+
+ if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2786 Adapter does not support menlo "
+ "commands\n");
+ rc = -EPERM;
+ goto no_dd_data;
+ }
+
+ menlo_cmd = (struct menlo_command *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ menlo_resp = (struct menlo_response *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2787 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto free_dd;
+ }
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ rc = -ENOMEM;
+ goto free_bmp;
+ }
+
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (!rspiocbq) {
+ rc = -ENOMEM;
+ goto free_cmdiocbq;
+ }
+
+ rsp = &rspiocbq->iocb;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto free_rspiocbq;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize =
+ (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
+ cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+ cmd->ulpBdeCount = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpLe = 1; /* Limited Edition */
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->vport = phba->pport;
+ /* We want the firmware to timeout before we do */
+ cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+ cmdiocbq->context3 = bmp;
+ cmdiocbq->context2 = rspiocbq;
+ cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = rspiocbq;
+ if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpPU = MENLO_PU; /* 3 */
+ cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+ cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+ } else {
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+ cmd->ulpPU = 1;
+ cmd->un.ulpWord[4] = 0;
+ cmd->ulpContext = menlo_cmd->xri;
+ }
+
+ dd_data->type = TYPE_MENLO;
+ dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
+ dd_data->context_un.menlo.rspiocbq = rspiocbq;
+ dd_data->context_un.menlo.set_job = job;
+ dd_data->context_un.menlo.bmp = bmp;
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+ MENLO_TIMEOUT - 5);
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
+
+ /* iocb failed so cleanup */
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+free_rspiocbq:
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+free_cmdiocbq:
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_bmp:
+ kfree(bmp);
+free_dd:
+ kfree(dd_data);
+no_dd_data:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
@@ -2669,6 +2985,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
+ case LPFC_BSG_VENDOR_MENLO_CMD:
+ case LPFC_BSG_VENDOR_MENLO_DATA:
+ rc = lpfc_menlo_cmd(job);
+ break;
default:
rc = -EINVAL;
job->reply->reply_payload_rcv_len = 0;
@@ -2728,6 +3048,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb *iocb;
struct lpfc_bsg_mbox *mbox;
+ struct lpfc_bsg_menlo *menlo;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct bsg_job_data *dd_data;
unsigned long flags;
@@ -2775,6 +3096,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
job->job_done(job);
break;
+ case TYPE_MENLO:
+ menlo = &dd_data->context_un.menlo;
+ cmdiocb = menlo->cmdiocbq;
+ /* hint to completion handler that the job timed out */
+ job->reply->result = -EAGAIN;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* this will call our completion handler */
+ spin_lock_irq(&phba->hbalock);
+ lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ spin_unlock_irq(&phba->hbalock);
+ break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 6c8f87e39b98..5bc630819b9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -31,6 +31,8 @@
#define LPFC_BSG_VENDOR_DIAG_TEST 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
+#define LPFC_BSG_VENDOR_MENLO_CMD 8
+#define LPFC_BSG_VENDOR_MENLO_DATA 9
struct set_ct_event {
uint32_t command;
@@ -96,3 +98,13 @@ struct dfc_mbox_req {
uint8_t mbOffset;
};
+/* Used for menlo command or menlo data. The xri is only used for menlo data */
+struct menlo_command {
+ uint32_t cmd;
+ uint32_t xri;
+};
+
+struct menlo_response {
+ uint32_t xri; /* return the xri of the iocb exchange */
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 6f0fb51eb461..5087c4211b43 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -221,6 +222,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
+void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
+uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
+void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -385,7 +390,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
-
+struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2a40a6eabf4d..ee980bd66869 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
struct serv_parm *sp;
+ uint16_t fcf_index;
int rc;
/* Check to see if link went down during discovery */
@@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->port_state);
if (irsp->ulpStatus) {
+ /*
+ * In case of FIP mode, perform round robin FCF failover
+ * due to new FCF discovery
+ */
+ if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+ (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+ "2611 FLOGI failed on registered "
+ "FCF record fcf_index:%d, trying "
+ "to perform round robin failover\n",
+ phba->fcf.current_rec.fcf_indx);
+ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+ if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+ /*
+ * Exhausted the eligible FCF record list,
+ * fail through to retry FLOGI on current
+ * FCF record.
+ */
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_FIP | LOG_ELS,
+ "2760 FLOGI exhausted FCF "
+ "round robin failover list, "
+ "retry FLOGI on the current "
+ "registered FCF index:%d\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
+ fcf_index);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_FIP | LOG_ELS,
+ "2761 FLOGI round "
+ "robin FCF failover "
+ "read FCF failed "
+ "rc:x%x, fcf_index:"
+ "%d\n", rc,
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
+ } else
+ goto out;
+ }
+ }
+
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
@@ -806,9 +855,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0100 FLOGI failure Data: x%x x%x "
- "x%x\n",
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
goto flogifail;
@@ -842,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
- if (!rc)
+ if (!rc) {
+ /* Mark the FCF discovery process done */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
+ "2769 FLOGI successful on FCF record: "
+ "current_fcf_index:x%x, terminate FCF "
+ "round robin failover process\n",
+ phba->fcf.current_rec.fcf_indx);
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
goto out;
+ }
}
flogifail:
@@ -1409,6 +1467,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* PLOGI failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp))
rc = NLP_STE_FREED_NODE;
@@ -1577,6 +1639,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* PRLI failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp))
goto out;
@@ -1860,6 +1926,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* ADISC failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(irsp))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2009,6 +2079,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS command is being retried */
goto out;
/* LOGO failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (lpfc_error_lost_link(irsp))
goto out;
@@ -5989,7 +6063,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
else {
- lpfc_start_fdiscs(phba);
+ /*
+ * If the physical port is instantiated using
+ * FDISC, do not start vport discovery.
+ */
+ if (vport->port_state != LPFC_FDISC)
+ lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
}
} else
@@ -6055,21 +6134,18 @@ mbox_err_exit:
}
/**
- * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
* @phba: pointer to lpfc hba data structure.
*
- * This routine abort all pending discovery commands and
- * start a timer to retry FLOGI for the physical port
- * discovery.
+ * This routine cancels the retry delay timers to all the vports.
**/
void
-lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
- struct Scsi_Host *shost;
- int i;
uint32_t link_state;
+ int i;
/* Treat this failure as linkdown for all vports */
link_state = phba->link_state;
@@ -6087,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
}
+}
+
+/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+
+ /* Cancel the all vports retry delay retry timers */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
/* If fabric require FLOGI, then re-instantiate physical login */
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (!ndlp)
return;
-
shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock);
@@ -6219,7 +6312,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
@@ -6797,21 +6891,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
- spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list);
- spin_unlock_irqrestore(
- &phba->sli4_hba.abts_sgl_list_lock,
- iflag);
- spin_lock_irqsave(&phba->hbalock, iflag);
-
list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list);
+ sglq_entry->state = SGL_FREED;
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
}
- spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ sglq_entry = __lpfc_get_active_sglq(phba, xri);
+ if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ sglq_entry->state = SGL_XRI_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2359d0bfb734..c555e3b7f202 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1481,8 +1481,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
int
lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
{
- LPFC_MBOXQ_t *mbox;
- int rc;
/*
* If the Link is up and no FCoE events while in the
* FCF discovery, no need to restart FCF discovery.
@@ -1491,86 +1489,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
return 0;
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2768 Pending link or FCF event during current "
+ "handling of the previous event: link_state:x%x, "
+ "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
+ phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
+ phba->fcoe_eventtag);
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
spin_unlock_irq(&phba->hbalock);
- if (phba->link_state >= LPFC_LINK_UP)
- lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
- else {
+ if (phba->link_state >= LPFC_LINK_UP) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2780 Restart FCF table scan due to "
+ "pending FCF event:evt_tag_at_scan:x%x, "
+ "evt_tag_current:x%x\n",
+ phba->fcoe_eventtag_at_fcf_scan,
+ phba->fcoe_eventtag);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+ } else {
/*
* Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
* flag
*/
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
}
+ /* Unregister the currently registered FCF if required */
if (unreg_fcf) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_REGISTERED;
spin_unlock_irq(&phba->hbalock);
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR,
- LOG_DISCOVERY|LOG_MBOX,
- "2610 UNREG_FCFI mbox allocation failed\n");
- return 1;
- }
- lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
- mbox->vport = phba->pport;
- mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2611 UNREG_FCFI issue mbox failed\n");
- mempool_free(mbox, phba->mbox_mem_pool);
- }
+ lpfc_sli4_unregister_fcf(phba);
}
-
return 1;
}
/**
- * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
+ * @next_fcf_index: pointer to holder of next fcf index.
*
- * This function iterate through all the fcf records available in
- * HBA and choose the optimal FCF record for discovery. After finding
- * the FCF for discovery it register the FCF record and kick start
- * discovery.
- * If FCF_IN_USE flag is set in currently used FCF, the routine try to
- * use a FCF record which match fabric name and mac address of the
- * currently used FCF record.
- * If the driver support only one FCF, it will try to use the FCF record
- * used by BOOT_BIOS.
+ * This routine parses the non-embedded fcf mailbox command by performing the
+ * necessarily error checking, non-embedded read FCF record mailbox command
+ * SGE parsing, and endianness swapping.
+ *
+ * Returns the pointer to the new FCF record in the non-embedded mailbox
+ * command DMA memory if successfully, other NULL.
*/
-void
-lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+static struct fcf_record *
+lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+ uint16_t *next_fcf_index)
{
void *virt_addr;
dma_addr_t phys_addr;
- uint8_t *bytep;
struct lpfc_mbx_sge sge;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
struct fcf_record *new_fcf_record;
- uint32_t boot_flag, addr_mode;
- uint32_t next_fcf_index;
- struct lpfc_fcf_rec *fcf_rec = NULL;
- unsigned long iflags;
- uint16_t vlan_id;
- int rc;
-
- /* If there is pending FCoE event restart FCF table scan */
- if (lpfc_check_pending_fcoe_event(phba, 0)) {
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return;
- }
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
@@ -1581,59 +1563,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2524 Failed to get the non-embedded SGE "
"virtual address\n");
- goto out;
+ return NULL;
}
virt_addr = mboxq->sge_array->addr[0];
shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- /*
- * The FCF Record was read and there is no reason for the driver
- * to maintain the FCF record data or memory. Instead, just need
- * to book keeping the FCFIs can be used.
- */
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status) {
- if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY)
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2726 READ_FCF_RECORD Indicates empty "
"FCF table.\n");
- } else {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2521 READ_FCF_RECORD mailbox failed "
- "with status x%x add_status x%x, mbx\n",
- shdr_status, shdr_add_status);
- }
- goto out;
+ "with status x%x add_status x%x, "
+ "mbx\n", shdr_status, shdr_add_status);
+ return NULL;
}
- /* Interpreting the returned information of FCF records */
+
+ /* Interpreting the returned information of the FCF record */
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
sizeof(struct lpfc_mbx_read_fcf_tbl));
- next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
-
+ *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
new_fcf_record = (struct fcf_record *)(virt_addr +
sizeof(struct lpfc_mbx_read_fcf_tbl));
lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
sizeof(struct fcf_record));
- bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ return new_fcf_record;
+}
+
+/**
+ * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record.
+ * @vlan_id: the lowest vlan identifier associated to this fcf record.
+ * @next_fcf_index: the index to the next fcf record in hba's fcf table.
+ *
+ * This routine logs the detailed FCF record if the LOG_FIP loggin is
+ * enabled.
+ **/
+static void
+lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
+ struct fcf_record *fcf_record,
+ uint16_t vlan_id,
+ uint16_t next_fcf_index)
+{
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2764 READ_FCF_RECORD:\n"
+ "\tFCF_Index : x%x\n"
+ "\tFCF_Avail : x%x\n"
+ "\tFCF_Valid : x%x\n"
+ "\tFIP_Priority : x%x\n"
+ "\tMAC_Provider : x%x\n"
+ "\tLowest VLANID : x%x\n"
+ "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
+ "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+ "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+ "\tNext_FCF_Index: x%x\n",
+ bf_get(lpfc_fcf_record_fcf_index, fcf_record),
+ bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
+ bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+ fcf_record->fip_priority,
+ bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
+ vlan_id,
+ bf_get(lpfc_fcf_record_mac_0, fcf_record),
+ bf_get(lpfc_fcf_record_mac_1, fcf_record),
+ bf_get(lpfc_fcf_record_mac_2, fcf_record),
+ bf_get(lpfc_fcf_record_mac_3, fcf_record),
+ bf_get(lpfc_fcf_record_mac_4, fcf_record),
+ bf_get(lpfc_fcf_record_mac_5, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
+ bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
+ bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
+ next_fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterates through all the fcf records available in
+ * HBA and chooses the optimal FCF record for discovery. After finding
+ * the FCF for discovery it registers the FCF record and kicks start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
+ * use an FCF record which matches fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver supports only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t fcf_index, next_fcf_index;
+ struct lpfc_fcf_rec *fcf_rec = NULL;
+ uint16_t vlan_id;
+ int rc;
+
+ /* If there is pending FCoE event restart FCF table scan */
+ if (lpfc_check_pending_fcoe_event(phba, 0)) {
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2765 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record.\n");
+ /* Let next new FCF event trigger fast failover */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
+ /* Check the FCF record against the connection list */
rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
&addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
/*
* If the fcf record does not match with connect list entries
- * read the next entry.
+ * read the next entry; otherwise, this is an eligible FCF
+ * record for round robin FCF failover.
*/
- if (!rc)
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2781 FCF record fcf_index:x%x failed FCF "
+ "connection list check, fcf_avail:x%x, "
+ "fcf_valid:x%x\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ bf_get(lpfc_fcf_record_fcf_avail,
+ new_fcf_record),
+ bf_get(lpfc_fcf_record_fcf_valid,
+ new_fcf_record));
goto read_next_fcf;
+ } else {
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+ if (rc)
+ goto read_next_fcf;
+ }
+
/*
* If this is not the first FCF discovery of the HBA, use last
* FCF record for the discovery. The condition that a rescan
* matches the in-use FCF record: fabric name, switch name, mac
* address, and vlan_id.
*/
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
new_fcf_record) &&
@@ -1649,8 +1755,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
/* If in fast failover, mark it's completed */
- phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
+ FCF_DISCOVERY);
+ spin_unlock_irq(&phba->hbalock);
goto out;
}
/*
@@ -1661,7 +1768,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* next candidate.
*/
if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
}
}
@@ -1669,14 +1776,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* Update on failover FCF record only if it's in FCF fast-failover
* period; otherwise, update on current FCF record.
*/
- if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
- /* Fast FCF failover only to the same fabric name */
- if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
- new_fcf_record))
- fcf_rec = &phba->fcf.failover_rec;
- else
- goto read_next_fcf;
- } else
+ if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ fcf_rec = &phba->fcf.failover_rec;
+ else
fcf_rec = &phba->fcf.current_rec;
if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
@@ -1689,7 +1791,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Choose this FCF record */
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, BOOT_ENABLE);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
}
/*
@@ -1698,20 +1800,19 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* the next FCF record.
*/
if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
}
/*
* If the new hba FCF record has lower priority value
* than the driver FCF record, use the new record.
*/
- if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) &&
- (new_fcf_record->fip_priority < fcf_rec->priority)) {
+ if (new_fcf_record->fip_priority < fcf_rec->priority) {
/* Choose this FCF record */
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
addr_mode, vlan_id, 0);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
}
/*
@@ -1724,7 +1825,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
BOOT_ENABLE : 0));
phba->fcf.fcf_flag |= FCF_AVAILABLE;
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
goto read_next_fcf;
read_next_fcf:
@@ -1740,9 +1841,22 @@ read_next_fcf:
* FCF scan inprogress, and do nothing
*/
if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2782 No suitable FCF record "
+ "found during this round of "
+ "post FCF rediscovery scan: "
+ "fcf_evt_tag:x%x, fcf_index: "
+ "x%x\n",
+ phba->fcoe_eventtag_at_fcf_scan,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
+ /*
+ * Let next new FCF event trigger fast
+ * failover
+ */
+ spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
return;
}
/*
@@ -1754,16 +1868,23 @@ read_next_fcf:
* record.
*/
- /* unregister the current in-use FCF record */
+ /* Unregister the current in-use FCF record */
lpfc_unregister_fcf(phba);
- /* replace in-use record with the new record */
+
+ /* Replace in-use record with the new record */
memcpy(&phba->fcf.current_rec,
&phba->fcf.failover_rec,
sizeof(struct lpfc_fcf_rec));
/* mark the FCF fast failover completed */
- spin_lock_irqsave(&phba->hbalock, iflags);
+ spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Set up the initial registered FCF index for FLOGI
+ * round robin FCF failover.
+ */
+ phba->fcf.fcf_rr_init_indx =
+ phba->fcf.failover_rec.fcf_indx;
/* Register to the new FCF record */
lpfc_register_fcf(phba);
} else {
@@ -1776,13 +1897,25 @@ read_next_fcf:
return;
/*
* Otherwise, initial scan or post linkdown rescan,
- * register with the best fit FCF record found so
- * far through the scanning process.
+ * register with the best FCF record found so far
+ * through the FCF scanning process.
+ */
+
+ /* mark the initial FCF discovery completed */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Set up the initial registered FCF index for FLOGI
+ * round robin FCF failover
*/
+ phba->fcf.fcf_rr_init_indx =
+ phba->fcf.current_rec.fcf_indx;
+ /* Register to the new FCF record */
lpfc_register_fcf(phba);
}
} else
- lpfc_sli4_read_fcf_record(phba, next_fcf_index);
+ lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
return;
out:
@@ -1793,6 +1926,141 @@ out:
}
/**
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function for FLOGI failure round robin FCF failover
+ * read FCF record mailbox command from the eligible FCF record bmask for
+ * performing the failover. If the FCF read back is not valid/available, it
+ * fails through to retrying FLOGI to the currently registered FCF again.
+ * Otherwise, if the FCF read back is valid and available, it will set the
+ * newly read FCF record to the failover FCF record, unregister currently
+ * registered FCF record, copy the failover FCF record to the current
+ * FCF record, and then register the current FCF record before proceeding
+ * to trying FLOGI on the new failover FCF.
+ */
+void
+lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t next_fcf_index;
+ uint16_t current_fcf_index;
+ uint16_t vlan_id;
+
+ /* If link state is not up, stop the round robin failover process */
+ if (phba->link_state < LPFC_LINK_UP) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return;
+ }
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2766 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record.\n");
+ goto out;
+ }
+
+ /* Get the needed parameters from FCF record */
+ lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
+ /* Upload new FCF record to the failover FCF record */
+ spin_lock_irq(&phba->hbalock);
+ __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
+ new_fcf_record, addr_mode, vlan_id,
+ (boot_flag ? BOOT_ENABLE : 0));
+ spin_unlock_irq(&phba->hbalock);
+
+ current_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+ /* Unregister the current in-use FCF record */
+ lpfc_unregister_fcf(phba);
+
+ /* Replace in-use record with the new record */
+ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
+ sizeof(struct lpfc_fcf_rec));
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2783 FLOGI round robin FCF failover from FCF "
+ "(index:x%x) to FCF (index:x%x).\n",
+ current_fcf_index,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_register_fcf(phba);
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function of read FCF record mailbox command for
+ * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * failover when a new FCF event happened. If the FCF read back is
+ * valid/available and it passes the connection list check, it updates
+ * the bmask for the eligible FCF record for round robin failover.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ struct fcf_record *new_fcf_record;
+ uint32_t boot_flag, addr_mode;
+ uint16_t fcf_index, next_fcf_index;
+ uint16_t vlan_id;
+ int rc;
+
+ /* If link state is not up, no need to proceed */
+ if (phba->link_state < LPFC_LINK_UP)
+ goto out;
+
+ /* If FCF discovery period is over, no need to proceed */
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY)
+ goto out;
+
+ /* Parse the FCF record from the non-embedded mailbox command */
+ new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+ &next_fcf_index);
+ if (!new_fcf_record) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2767 Mailbox command READ_FCF_RECORD "
+ "failed to retrieve a FCF record.\n");
+ goto out;
+ }
+
+ /* Check the connection list for eligibility */
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
+
+ /* Log the FCF record information if turned on */
+ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+ next_fcf_index);
+
+ if (!rc)
+ goto out;
+
+ /* Update the eligible FCF record index bmask */
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
* lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox data structure.
@@ -2024,8 +2292,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
int rc;
struct fcf_record *fcf_record;
- sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-
spin_lock_irq(&phba->hbalock);
switch (la->UlnkSpeed) {
case LA_1GHZ_LINK:
@@ -2117,18 +2383,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock);
lpfc_linkup(phba);
- if (sparam_mbox) {
- lpfc_read_sparam(phba, sparam_mbox, 0);
- sparam_mbox->vport = vport;
- sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
- rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(sparam_mbox, phba->mbox_mem_pool);
- goto out;
- }
+ sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!sparam_mbox)
+ goto out;
+
+ rc = lpfc_read_sparam(phba, sparam_mbox, 0);
+ if (rc) {
+ mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ sparam_mbox->vport = vport;
+ sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+ rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ goto out;
}
if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -2186,10 +2458,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
spin_unlock_irq(&phba->hbalock);
return;
}
+ /* This is the initial FCF discovery scan */
+ phba->fcf.fcf_flag |= FCF_INIT_DISC;
spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
- if (rc)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2778 Start FCF table scan at linkup\n");
+
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
goto out;
+ }
}
return;
@@ -3379,8 +3661,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
- if (ndlp->nlp_flag & NLP_RPI_VALID)
+ if (ndlp->nlp_flag & NLP_RPI_VALID) {
+ /* The mempool_alloc might sleep */
+ spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vports[i], ndlp);
+ spin_lock_irq(shost->host_lock);
+ }
}
spin_unlock_irq(shost->host_lock);
}
@@ -4756,6 +5042,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
return;
/* Reset HBA FCF states after successful unregister FCF */
phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
/*
* If driver is not unloading, check if there is any other
@@ -4765,13 +5052,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
(phba->link_state < LPFC_LINK_UP))
return;
- rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+ /* This is considered as the initial FCF discovery scan */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
- if (rc)
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+ spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
"2553 lpfc_unregister_unused_fcf failed "
"to read FCF record HBA state x%x\n",
phba->pport->port_state);
+ }
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c317d9..ea44239eeb33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -350,7 +350,12 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mb = &pmb->u.mb;
/* Get login parameters for NID. */
- lpfc_read_sparam(phba, pmb, 0);
+ rc = lpfc_read_sparam(phba, pmb, 0);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+
pmb->vport = vport;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
mp = (struct lpfc_dmabuf *) pmb->context1;
- mempool_free( pmb, phba->mbox_mem_pool);
+ mempool_free(pmb, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
@@ -544,7 +549,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
- } else if (phba->cfg_suppress_link_up == 0) {
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
lpfc_init_link(phba, pmb, phba->cfg_topology,
phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +576,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
lpfc_config_async(phba, pmb, LPFC_ELS_RING);
pmb->mbox_cmpl = lpfc_config_async_cmpl;
pmb->vport = phba->pport;
@@ -588,6 +598,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Get Option rom version */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
lpfc_dump_wakeup_param(phba, pmb);
pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
pmb->vport = phba->pport;
@@ -652,7 +667,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
- phba->cfg_suppress_link_up = 0;
+ phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
return 0;
}
@@ -807,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(aborts);
int ret;
unsigned long iflag = 0;
+ struct lpfc_sglq *sglq_entry = NULL;
+
ret = lpfc_hba_down_post_s3(phba);
if (ret)
return ret;
@@ -822,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
* list.
*/
spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry(sglq_entry,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+ sglq_entry->state = SGL_FREED;
+
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
&phba->sli4_hba.lpfc_sgl_list);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2199,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{
- /* Clear pending FCF rediscovery wait timer */
- phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+ /* Clear pending FCF rediscovery wait and failover in progress flags */
+ phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
+ FCF_DEAD_DISC |
+ FCF_ACVL_DISC);
/* Now, try to stop the timer */
del_timer(&phba->fcf.redisc_wait);
}
@@ -2576,6 +2599,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
+ if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+ phba->menlo_flag |= HBA_MENLO_SUPPORT;
+ /* check for menlo minimum sg count */
+ if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+ shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ }
+ }
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
@@ -2912,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
/* FCF rediscovery event to worker thread */
phba->fcf.fcf_flag |= FCF_REDISC_EVT;
spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2776 FCF rediscover wait timer expired, post "
+ "a worker thread event for FCF table scan\n");
/* wake up worker thread */
lpfc_worker_wake_up(phba);
}
@@ -3183,6 +3217,68 @@ out_free_pmb:
}
/**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+
+ if (!vport)
+ return NULL;
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ return NULL;
+ phba = vport->phba;
+ if (!phba)
+ return NULL;
+ if (phba->pport->port_state <= LPFC_FLOGI)
+ return NULL;
+ /* If virtual link is not yet instantiated ignore CVL */
+ if (vport->port_state <= LPFC_FDISC)
+ return NULL;
+ shost = lpfc_shost_from_vport(vport);
+ if (!shost)
+ return NULL;
+ lpfc_linkdown_port(vport);
+ lpfc_cleanup_pending_mbox(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_CVL_RCVD;
+ spin_unlock_irq(shost->host_lock);
+
+ return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports)
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+ lpfc_sli4_perform_vport_cvl(vports[i]);
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3294,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
- uint32_t link_state;
int active_vlink_present;
struct lpfc_vport **vports;
int i;
@@ -3208,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2546 New FCF found index 0x%x tag 0x%x\n",
- acqe_fcoe->index,
- acqe_fcoe->event_tag);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2546 New FCF found/FCF parameter modified event: "
+ "evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag, acqe_fcoe->index);
+
spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
break;
}
+
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
/*
* If fast FCF failover rescan event is pending,
@@ -3232,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
}
spin_unlock_irq(&phba->hbalock);
- /* Read the FCF table and re-discover SAN. */
- rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+ if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+ !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ /*
+ * During period of FCF discovery, read the FCF
+ * table record indexed by the event to update
+ * FCF round robin failover eligible FCF bmask.
+ */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+ LOG_DISCOVERY,
+ "2779 Read new FCF record with "
+ "fcf_index:x%x for updating FCF "
+ "round robin failover bmask\n",
+ acqe_fcoe->index);
+ rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
+ }
+
+ /* Otherwise, scan the entire FCF table and re-discover SAN */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2770 Start FCF table scan due to new FCF "
+ "event: evt_tag:x%x, fcf_index:x%x\n",
+ acqe_fcoe->event_tag, acqe_fcoe->index);
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+ LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2547 Read FCF record failed 0x%x\n",
- rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2547 Issue FCF scan read FCF mailbox "
+ "command failed 0x%x\n", rc);
break;
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3366,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break;
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2549 FCF disconnected from network index 0x%x"
" tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */
if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
break;
- /*
- * Currently, driver support only one FCF - so treat this as
- * a link down, but save the link state because we don't want
- * it to be changed to Link Down unless it is already down.
+ /* We request port to rediscover the entire FCF table for
+ * a fast recovery from case that the current FCF record
+ * is no longer valid if we are not in the middle of FCF
+ * failover process already.
*/
- link_state = phba->link_state;
- lpfc_linkdown(phba);
- phba->link_state = link_state;
- /* Unregister FCF if no devices connected to it */
- lpfc_unregister_unused_fcf(phba);
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ spin_unlock_irq(&phba->hbalock);
+ /* Update FLOGI FCF failover eligible FCF bmask */
+ lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
+ break;
+ }
+ /* Mark the fast failover process in progress */
+ phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2771 Start FCF fast failover process due to "
+ "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+ "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2772 Issue FCF rediscover mabilbox "
+ "command failed, fail through to FCF "
+ "dead event\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * Last resort will fail over by treating this
+ * as a link down to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ } else
+ /* Handling fast FCF failover to a DEAD FCF event
+ * is considered equalivant to receiving CVL to all
+ * vports.
+ */
+ lpfc_sli4_perform_all_vport_cvl(phba);
break;
case LPFC_FCOE_EVENT_TYPE_CVL:
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
vport = lpfc_find_vport_by_vpid(phba,
acqe_fcoe->index - phba->vpi_base);
- if (!vport)
- break;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ ndlp = lpfc_sli4_perform_vport_cvl(vport);
if (!ndlp)
break;
- shost = lpfc_shost_from_vport(vport);
- if (phba->pport->port_state <= LPFC_FLOGI)
- break;
- /* If virtual link is not yet instantiated ignore CVL */
- if (vport->port_state <= LPFC_FDISC)
- break;
-
- lpfc_linkdown_port(vport);
- lpfc_cleanup_pending_mbox(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_CVL_RCVD;
- spin_unlock_irq(shost->host_lock);
active_vlink_present = 0;
vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3445,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3456,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* Otherwise, we request port to rediscover
* the entire FCF table for a fast recovery
* from possible case that the current FCF
- * is no longer valid.
+ * is no longer valid if we are not already
+ * in the FCF failover process.
*/
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ /* Mark the fast failover process in progress */
+ phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+ LOG_DISCOVERY,
+ "2773 Start FCF fast failover due "
+ "to CVL event: evt_tag:x%x\n",
+ acqe_fcoe->event_tag);
rc = lpfc_sli4_redisc_fcf_table(phba);
- if (rc)
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+ LOG_DISCOVERY,
+ "2774 Issue FCF rediscover "
+ "mabilbox command failed, "
+ "through to CVL event\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
/*
* Last resort will be re-try on the
* the current registered FCF entry.
*/
lpfc_retry_pport_discovery(phba);
+ }
}
break;
default:
@@ -3426,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Scan FCF table from the first entry to re-discover SAN */
- rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+ "2777 Start FCF table scan after FCF "
+ "rediscovery quiescent period over\n");
+ rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "2747 Post FCF rediscovery read FCF record "
- "failed 0x%x\n", rc);
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+ "2747 Issue FCF scan read FCF mailbox "
+ "command failed 0x%x\n", rc);
}
/**
@@ -3722,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
+ int longs;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_active_sgl;
}
+ /* Allocate eligible FCF bmask memory for FCF round robin failover */
+ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+ phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!phba->fcf.fcf_rr_bmask) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2759 Failed allocate memory for FCF round "
+ "robin failover bmask\n");
+ goto out_remove_rpi_hdrs;
+ }
+
phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
phba->cfg_fcp_eq_count), GFP_KERNEL);
if (!phba->sli4_hba.fcp_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path "
"per-EQ handle array\n");
- goto out_remove_rpi_hdrs;
+ goto out_free_fcf_rr_bmask;
}
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
out_free_fcp_eq_hdl:
kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+ kfree(phba->fcf.fcf_rr_bmask);
out_remove_rpi_hdrs:
lpfc_sli4_remove_rpi_hdrs(phba);
out_free_active_sgl:
@@ -4002,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_remove_rpi_hdrs(phba);
lpfc_sli4_remove_rpis(phba);
+ /* Free eligible FCF index bmask */
+ kfree(phba->fcf.fcf_rr_bmask);
+
/* Free the ELS sgl list */
lpfc_free_active_sgl(phba);
lpfc_free_sgl_list(phba);
@@ -4397,6 +4575,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
/* The list order is used by later block SGL registraton */
spin_lock_irq(&phba->hbalock);
+ sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
phba->sli4_hba.total_sglq_bufs++;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57970a3..bb59e9273126 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
#define LOG_VPORT 0x00004000 /* NPIV events */
#define LOF_SECURITY 0x00008000 /* Security events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
+#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c4dce1a30ca..1e61ae3bc4eb 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/**
- * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
+ * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
* @phba: pointer to lpfc hba data structure.
* @fcf_index: index to fcf table.
*
@@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* NULL.
**/
int
-lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
- struct lpfcMboxq *mboxq,
- uint16_t fcf_index)
+lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
+ struct lpfcMboxq *mboxq,
+ uint16_t fcf_index)
{
void *virt_addr;
dma_addr_t phys_addr;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 483fb74bc592..b16bb2c9978b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_scsi_buf *psb, *next_psb;
unsigned long iflag = 0;
+ struct lpfc_iocbq *iocbq;
+ int i;
- spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
- spin_unlock_irqrestore(
- &phba->sli4_hba.abts_scsi_buf_list_lock,
- iflag);
+ spin_unlock(
+ &phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_release_scsi_buf_s4(phba, psb);
return;
}
}
- spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
- iflag);
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ for (i = 1; i <= phba->sli.last_iotag; i++) {
+ iocbq = phba->sli.iocbq_lookup[i];
+
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ continue;
+ if (iocbq->sli4_xritag != xri)
+ continue;
+ psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+ psb->exch_busy = 0;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
}
/**
@@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+ struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr;
@@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* explicitly reinitialized since all iocb memory resources are reused.
*/
if (phba->sli_rev == 3 &&
- !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+ !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
} else {
iocb_cmd->un.fcpi64.bdl.bdeSize =
((num_bde + 2) * sizeof(struct ulp_bde64));
+ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
}
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
@@ -2079,8 +2100,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (resp_info & RSP_LEN_VALID) {
rsplen = be32_to_cpu(fcprsp->rspRspLen);
- if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
- (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
+ if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"2719 Invalid response length: "
"tgt x%x lun x%x cmnd x%x rsplen x%x\n",
@@ -2090,6 +2110,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
host_status = DID_ERROR;
goto out;
}
+ if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "2757 Protocol failure detected during "
+ "processing of FCP I/O op: "
+ "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
+ cmnd->device->id,
+ cmnd->device->lun, cmnd->cmnd[0],
+ fcprsp->rspInfo3);
+ host_status = DID_ERROR;
+ goto out;
+ }
}
if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96d4e07..fe6660ca6452 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
*
* Returns sglq ponter = success, NULL = Failure.
**/
-static struct lpfc_sglq *
+struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
uint16_t adj_xri;
@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
return NULL;
adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+ sglq->state = SGL_ALLOCATED;
return sglq;
}
@@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
- if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
+ if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ (sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag);
- } else
+ } else {
+ sglq->state = SGL_FREED;
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ }
}
@@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
- /* Set cmdiocb flag for the exchange
- * busy so sgl (xri) will not be
- * released until the abort xri is
- * received from hba, clear the
- * LPFC_DRIVER_ABORTED bit in case
- * it was driver initiated abort.
- */
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- cmdiocbp->iocb_flag &=
- ~LPFC_DRIVER_ABORTED;
- cmdiocbp->iocb_flag |=
- LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- cmdiocbp->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- cmdiocbp->iocb.un.ulpWord[4] =
- IOERR_ABORT_REQUESTED;
- /*
- * For SLI4, irsiocb contains NO_XRI
- * in sli_xritag, it shall not affect
- * releasing sgl (xri) process.
- */
- saveq->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- saveq->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (saveq->iocb_flag &
+ LPFC_EXCHANGE_BUSY) {
+ /* Set cmdiocb flag for the
+ * exchange busy so sgl (xri)
+ * will not be released until
+ * the abort xri is received
+ * from hba.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag |=
+ LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
+ if (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED) {
+ /*
+ * Clear LPFC_DRIVER_ABORTED
+ * bit in case it was driver
+ * initiated abort.
+ */
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ cmdiocbp->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ cmdiocbp->iocb.un.ulpWord[4] =
+ IOERR_ABORT_REQUESTED;
+ /*
+ * For SLI4, irsiocb contains
+ * NO_XRI in sli_xritag, it
+ * shall not affect releasing
+ * sgl (xri) process.
+ */
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_lock_irqsave(
+ &phba->hbalock, iflag);
+ saveq->iocb_flag |=
+ LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(
+ &phba->hbalock, iflag);
+ }
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- }
+ if (unlikely(!cmdiocbq))
+ break;
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->iocb_cmpl) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+ &rspiocbq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3112,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2751 Adapter failed to restart, "
+ "status reg x%x, FW Data: A8 x%x AC x%x\n",
+ status,
+ readl(phba->MBslimaddr + 0xa8),
+ readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
@@ -3278,6 +3305,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2752 KILL_BOARD command failed retval %d\n",
+ retval);
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4065,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0445 Firmware initialization failed\n");
return rc;
}
@@ -4388,7 +4418,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Read the port's service parameters. */
- lpfc_read_sparam(phba, mboxq, vport->vpi);
+ rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ rc = -ENOMEM;
+ goto out_free_vpd;
+ }
+
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Post receive buffers to the device */
lpfc_sli4_rb_setup(phba);
+ /* Reset HBA FCF states after HBA reset */
+ phba->fcf.fcf_flag = 0;
+ phba->fcf.current_rec.flag = 0;
+
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7476,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
{
wait_queue_head_t *pdone_q;
unsigned long iflags;
+ struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7484,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
+ /* Set the exchange busy flag for task management commands */
+ if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+ cur_iocbq);
+ lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ }
+
pdone_q = cmdiocbq->context_un.wait_queue;
if (pdone_q)
wake_up(pdone_q);
@@ -9061,6 +9110,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
/* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
@@ -11941,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
- * This routine is invoked to read up to @fcf_num of FCF record from the
- * device starting with the given @fcf_index.
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
**/
int
-lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
error = -ENOMEM;
- goto fail_fcfscan;
+ goto fail_fcf_scan;
}
/* Construct the read FCF record mailbox command */
- rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
if (rc) {
error = -EINVAL;
- goto fail_fcfscan;
+ goto fail_fcf_scan;
}
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
- mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
error = -EIO;
@@ -11979,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
+ /* Reset FCF round robin index bmask for new scan */
+ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
error = 0;
}
-fail_fcfscan:
+fail_fcf_scan:
if (error) {
if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12057,181 @@ fail_fcfscan:
}
/**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI round robin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2763 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI round robin failover list.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ int rc = 0, error;
+ LPFC_MBOXQ_t *mboxq;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+ "2758 Failed to allocate mbox for "
+ "READ_FCF cmd\n");
+ error = -ENOMEM;
+ goto fail_fcf_read;
+ }
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
+ goto fail_fcf_read;
+ }
+ /* Issue the mailbox command asynchronously */
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ error = -EIO;
+ else
+ error = 0;
+
+fail_fcf_read:
+ if (error && mboxq)
+ lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_index;
+
+ /* Search from the currently registered FCF index */
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX,
+ phba->fcf.current_rec.fcf_indx);
+ /* Wrap around condition on phba->fcf.fcf_rr_bmask */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ /* Round robin failover stop condition */
+ if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+
+ return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2610 HBA FCF index reached driver's "
+ "book keeping dimension: fcf_index:%d, "
+ "driver_bmask_max:%d\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return -EINVAL;
+ }
+ /* Set the eligible FCF record index bmask */
+ set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2762 HBA FCF index goes beyond driver's "
+ "book keeping dimension: fcf_index:%d, "
+ "driver_bmask_max:%d\n",
+ fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+ return;
+ }
+ /* Clear the eligible FCF record index bmask */
+ clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+}
+
+/**
* lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
* @phba: pointer to lpfc hba data structure.
*
@@ -12014,21 +12252,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&redisc_fcf->header.cfg_shdr.response);
if (shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2746 Requesting for FCF rediscovery failed "
"status x%x add_status x%x\n",
shdr_status, shdr_add_status);
- /*
- * Request failed, last resort to re-try current
- * registered FCF entry
- */
- lpfc_retry_pport_discovery(phba);
- } else
+ if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * CVL event triggered FCF rediscover request failed,
+ * last resort to re-try current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else {
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+ spin_unlock_irq(&phba->hbalock);
+ /*
+ * DEAD FCF event triggered FCF rediscover request
+ * failed, last resort to fail over as a link down
+ * to FCF registration.
+ */
+ lpfc_sli4_fcf_dead_failthrough(phba);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2775 Start FCF rediscovery quiescent period "
+ "wait timer before scaning FCF table\n");
/*
* Start FCF rediscovery wait timer for pending FCF
* before rescan FCF record table.
*/
lpfc_fcf_redisc_wait_start_timer(phba);
+ }
mempool_free(mbox, phba->mbox_mem_pool);
}
@@ -12047,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
int rc, length;
+ /* Cancel retry delay timers to all vports before FCF rediscover */
+ lpfc_cancel_all_vport_retry_delay_timer(phba);
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12338,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+ uint32_t link_state;
+
+ /*
+ * Last resort as FCF DEAD event failover will treat this as
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
+ */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ /* Unregister FCF if no devices connected to it */
+ lpfc_unregister_unused_fcf(phba);
+}
+
+/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
*
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dfcf5437d1f5..b4a639c47616 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -62,6 +62,7 @@ struct lpfc_iocbq {
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
+#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 86308836600f..4a35e7b9bc5b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -153,15 +153,27 @@ struct lpfc_fcf {
#define FCF_REGISTERED 0x02 /* FCF registered with FW */
#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
-#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */
-#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */
-#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */
+#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
+#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
+#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
+#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
+#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
+ uint16_t fcf_rr_init_indx;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait;
+ unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
};
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
+
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
#define LPFC_REGION23_LAST_REC 0xff
@@ -431,11 +443,18 @@ enum lpfc_sge_type {
SCSI_BUFF_TYPE
};
+enum lpfc_sgl_state {
+ SGL_FREED,
+ SGL_ALLOCATED,
+ SGL_XRI_ABORTED
+};
+
struct lpfc_sglq {
/* lpfc_sglqs are used in double linked lists */
struct list_head list;
struct list_head clist;
enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+ enum lpfc_sgl_state state;
uint16_t iotag; /* pre-assigned IO tag */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */
@@ -463,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
struct lpfc_mbx_sge *);
-int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
- uint16_t);
+int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t);
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -523,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
-int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
-void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac276aa46fba..013deec5dae8 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.9"
+#define LPFC_DRIVER_VERSION "8.3.10"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dc86e873102a..869f76cbc58a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -123,7 +123,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
mb = &pmb->u.mb;
- lpfc_read_sparam(phba, pmb, vport->vpi);
+ rc = lpfc_read_sparam(phba, pmb, vport->vpi);
+ if (rc) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+
/*
* Grab buffer pointer and clear context1 so we can use
* lpfc_sli_issue_box_wait
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 24223473f573..60de85091502 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1433,6 +1433,10 @@ int osd_finalize_request(struct osd_request *or,
cdbh->command_specific_options |= or->attributes_mode;
if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
ret = _osd_req_finalize_attr_page(or);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
+ return ret;
+ }
} else {
/* TODO: I think that for the GET_ATTR command these 2 should
* be reversed to keep them in execution order (for embeded
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index bd88349b8526..2c146b44d95f 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,6 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
+#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
if (scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
@@ -71,6 +72,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
return i->f->is_raid(dev);
}
+#endif
/* FIXME: look at other subsystems too */
return 0;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 79660ee3e211..1d5b72173dd8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1232,6 +1232,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
{
struct fc_vport *vport = transport_class_to_vport(dev);
struct Scsi_Host *shost = vport_to_shost(vport);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return -EBUSY;
+ }
+ vport->flags |= FC_VPORT_DELETING;
+ spin_unlock_irqrestore(shost->host_lock, flags);
fc_queue_work(shost, &vport->vport_delete_work);
return count;
@@ -1821,6 +1830,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
list_for_each_entry(vport, &fc_host->vports, peers) {
if ((vport->channel == 0) &&
(vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+ break;
+ vport->flags |= FC_VPORT_DELETING;
match = 1;
break;
}
@@ -3370,18 +3382,6 @@ fc_vport_terminate(struct fc_vport *vport)
unsigned long flags;
int stat;
- spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & FC_VPORT_CREATING) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return -EBUSY;
- }
- if (vport->flags & (FC_VPORT_DEL)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return -EALREADY;
- }
- vport->flags |= FC_VPORT_DELETING;
- spin_unlock_irqrestore(shost->host_lock, flags);
-
if (i->f->vport_delete)
stat = i->f->vport_delete(vport);
else
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 83881dfb33c0..7b75c8a2a49d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1948,7 +1948,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
- const int vpd_len = 32;
+ const int vpd_len = 64;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
if (!buffer ||
@@ -1998,7 +1998,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
unsigned char *buffer;
u16 rot;
- const int vpd_len = 32;
+ const int vpd_len = 64;
buffer = kmalloc(vpd_len, GFP_KERNEL);