diff options
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/Makefile | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 42 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 139 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 38 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 15 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 49 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 970 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.h | 60 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 180 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 34 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 26 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 217 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 195 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 21 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nl.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 13 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 166 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 756 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 10 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 4 |
22 files changed, 2360 insertions, 591 deletions
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index ad05d6edb8f6..88928f00aa2d 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile @@ -1,7 +1,7 @@ #/******************************************************************* # * This file is part of the Emulex Linux Device Driver for * # * Fibre Channel Host Bus Adapters. * -# * Copyright (C) 2004-2006 Emulex. All rights reserved. * +# * Copyright (C) 2004-2011 Emulex. All rights reserved. * # * EMULEX and SLI are trademarks of Emulex. * # * www.emulex.com * # * * @@ -19,10 +19,8 @@ # *******************************************************************/ ###################################################################### -ifneq ($(GCOV),) - EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage - EXTRA_CFLAGS += -O0 -endif +ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage +ccflags-$(GCOV) += -O0 obj-$(CONFIG_SCSI_LPFC) := lpfc.o diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 746dd3d7a092..60e98a62f308 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -325,6 +325,7 @@ struct lpfc_vport { #define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ +#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ uint32_t ct_flags; #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ @@ -348,6 +349,8 @@ struct lpfc_vport { uint32_t fc_myDID; /* fibre channel S_ID */ uint32_t fc_prevDID; /* previous fibre channel S_ID */ + struct lpfc_name fabric_portname; + struct lpfc_name fabric_nodename; int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ @@ -372,6 +375,7 @@ struct lpfc_vport { #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ #define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */ +#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ #define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ @@ -382,6 +386,7 @@ struct lpfc_vport { struct timer_list fc_fdmitmo; struct timer_list els_tmofunc; + struct timer_list delayed_disc_tmo; int unreg_vpi_cmpl; @@ -534,6 +539,8 @@ struct lpfc_hba { (struct lpfc_hba *, uint32_t); int (*lpfc_hba_down_link) (struct lpfc_hba *, uint32_t); + int (*lpfc_selective_reset) + (struct lpfc_hba *); /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; @@ -548,6 +555,8 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_BG_ENABLED 0x20 #define LPFC_SLI3_DSS_ENABLED 0x40 +#define LPFC_SLI4_PERFH_ENABLED 0x80 +#define LPFC_SLI4_PHWQ_ENABLED 0x100 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -655,7 +664,7 @@ struct lpfc_hba { #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ - + uint32_t cfg_enable_dss; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -792,6 +801,10 @@ struct lpfc_hba { struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; + struct dentry *idiag_que_info; #endif /* Used for deferred freeing of ELS data buffers */ @@ -884,7 +897,18 @@ lpfc_worker_wake_up(struct lpfc_hba *phba) return; } -static inline void +static inline int +lpfc_readl(void __iomem *addr, uint32_t *data) +{ + uint32_t temp; + temp = readl(addr); + if (temp == 0xffffffff) + return -EIO; + *data = temp; + return 0; +} + +static inline int lpfc_sli_read_hs(struct lpfc_hba *phba) { /* @@ -893,15 +917,17 @@ lpfc_sli_read_hs(struct lpfc_hba *phba) */ phba->sli.slistat.err_attn_event++; - /* Save status info */ - phba->work_hs = readl(phba->HSregaddr); - phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); - phba->work_status[1] = readl(phba->MBslimaddr + 0xac); + /* Save status info and check for unplug error */ + if (lpfc_readl(phba->HSregaddr, &phba->work_hs) || + lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) || + lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) { + return -EIO; + } /* Clear chip Host Attention error bit */ writel(HA_ERATT, phba->HAregaddr); readl(phba->HAregaddr); /* flush */ phba->pport->stopped = 1; - return; + return 0; } diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3512abb8a587..17d789325f40 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int status = 0; int cnt = 0; int i; + int rc; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) } init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, type); + rc = lpfc_workq_post_event(phba, &status, &online_compl, type); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -671,17 +678,19 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) * * Notes: * Assumes any error from lpfc_do_offline() will be negative. + * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero * -EIO reset not configured or error posting the event * zero for success **/ -static int +int lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; + int rc; if (!phba->cfg_enable_hba_reset) return -EIO; @@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba) return status; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -734,7 +746,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, int status = -EINVAL; if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) - status = lpfc_selective_reset(phba); + status = phba->lpfc_selective_reset(phba); if (status == 0) return strlen(buf); @@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; struct completion online_compl; int status=0; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; wait_for_completion(&online_compl); } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -1209,7 +1224,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, if (val & ENABLE_FCP_RING_POLLING) { if ((val & DISABLE_FCP_RING_INT) && !(old_val & DISABLE_FCP_RING_INT)) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -1227,7 +1245,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, spin_unlock_irq(&phba->hbalock); del_timer(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -1279,6 +1300,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, } /** + * lpfc_dss_show - Return the current state of dss and the configured state + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_dss_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", + (phba->cfg_enable_dss) ? "Enabled" : "Disabled", + (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? + "" : "Not "); +} + +/** * lpfc_param_show - Return a cfg attribute value in decimal * * Description: @@ -1597,13 +1640,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_init(name, defval, minval, maxval) #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1611,7 +1654,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1622,7 +1665,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1630,7 +1673,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1641,13 +1684,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_init(name, defval, minval, maxval) #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1655,7 +1698,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1666,7 +1709,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1674,7 +1717,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1718,7 +1761,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); - +static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -1813,6 +1856,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, int stat1=0, stat2=0; unsigned int i, j, cnt=count; u8 wwpn[8]; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; @@ -1863,7 +1907,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, "0463 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat1); init_completion(&online_compl); - lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); + rc = lpfc_workq_post_event(phba, &stat2, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (stat2) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -1954,7 +2002,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ static int lpfc_poll = 0; -module_param(lpfc_poll, int, 0); +module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" @@ -1964,21 +2012,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); int lpfc_sli_mode = 0; -module_param(lpfc_sli_mode, int, 0); +module_param(lpfc_sli_mode, int, S_IRUGO); MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); int lpfc_enable_npiv = 1; -module_param(lpfc_enable_npiv, int, 0); +module_param(lpfc_enable_npiv, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); int lpfc_enable_rrq; -module_param(lpfc_enable_rrq, int, 0); +module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); lpfc_param_init(enable_rrq, 0, 0, 1); @@ -2040,7 +2088,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); int lpfc_iocb_cnt = 2; -module_param(lpfc_iocb_cnt, int, 1); +module_param(lpfc_iocb_cnt, int, S_IRUGO); MODULE_PARM_DESC(lpfc_iocb_cnt, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); lpfc_param_show(iocb_cnt); @@ -2192,7 +2240,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ -module_param(lpfc_devloss_tmo, int, 0); +module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); @@ -2302,7 +2350,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; -module_param(lpfc_restrict_login, int, 0); +module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); @@ -2473,7 +2521,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, return -EINVAL; } static int lpfc_topology = 0; -module_param(lpfc_topology, int, 0); +module_param(lpfc_topology, int, S_IRUGO); MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); lpfc_param_show(topology) lpfc_param_init(topology, 0, 0, 6) @@ -2915,7 +2963,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, } static int lpfc_link_speed = 0; -module_param(lpfc_link_speed, int, 0); +module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) @@ -3043,7 +3091,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, } static int lpfc_aer_support = 1; -module_param(lpfc_aer_support, int, 1); +module_param(lpfc_aer_support, int, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); lpfc_param_show(aer_support) @@ -3155,7 +3203,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, # The value is set in milliseconds. */ static int lpfc_max_scsicmpl_time; -module_param(lpfc_max_scsicmpl_time, int, 0); +module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); MODULE_PARM_DESC(lpfc_max_scsicmpl_time, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); @@ -3331,7 +3379,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; -module_param(lpfc_prot_mask, uint, 0); +module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* @@ -3343,9 +3391,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); # */ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; -module_param(lpfc_prot_guard, byte, 0); +module_param(lpfc_prot_guard, byte, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); +/* + * Delay initial NPort discovery when Clean Address bit is cleared in + * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. + * This parameter can have value 0 or 1. + * When this parameter is set to 0, no delay is added to the initial + * discovery. + * When this parameter is set to non-zero value, initial Nport discovery is + * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion + * when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Default value is 0. + */ +int lpfc_delay_discovery; +module_param(lpfc_delay_discovery, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_delay_discovery, + "Delay NPort discovery when Clean Address bit is cleared. " + "Allowed values: 0,1."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count @@ -3437,6 +3504,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, + &dev_attr_lpfc_dss, NULL, }; @@ -4447,7 +4515,7 @@ static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL) * Description: * This function is called by the transport after the @fc_vport's symbolic name * has been changed. This function re-registers the symbolic name with the - * switch to propogate the change into the fabric if the vport is active. + * switch to propagate the change into the fabric if the vport is active. **/ static void lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) @@ -4639,6 +4707,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); + phba->cfg_enable_dss = 1; return; } diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 0dd43bb91618..77b2871d96b7 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2010 Emulex. All rights reserved. * + * Copyright (C) 2009-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -348,7 +348,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) dd_data->context_un.iocb.bmp = bmp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO ; + goto free_cmdiocbq; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -599,7 +602,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) dd_data->context_un.iocb.ndlp = ndlp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -EIO; + goto linkdown_err; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -613,6 +619,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) else rc = -EIO; +linkdown_err: pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, @@ -1357,7 +1364,10 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, dd_data->context_un.iocb.ndlp = ndlp; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) { + rc = -IOCB_ERROR; + goto issue_ct_rsp_exit; + } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -1929,7 +1939,7 @@ out: * @rxxri: Receive exchange id * @len: Number of data bytes * - * This function allocates and posts a data buffer of sufficient size to recieve + * This function allocates and posts a data buffer of sufficient size to receive * an unsolicted CT command. **/ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, @@ -2479,16 +2489,18 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) from = (uint8_t *)dd_data->context_un.mbox.mb; job = dd_data->context_un.mbox.set_job; - size = job->reply_payload.payload_len; - job->reply->reply_payload_rcv_len = - sg_copy_from_buffer(job->reply_payload.sg_list, - job->reply_payload.sg_cnt, - from, size); - job->reply->result = 0; + if (job) { + size = job->reply_payload.payload_len; + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + from, size); + job->reply->result = 0; + job->dd_data = NULL; + job->job_done(job); + } dd_data->context_un.mbox.set_job = NULL; - job->dd_data = NULL; - job->job_done(job); /* need to hold the lock until we call job done to hold off * the timeout handler returning to the midlayer while * we are stillprocessing the job diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 17fde522c84a..f0b332f4eedb 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_supported_pages(struct lpfcMboxq *); -void lpfc_sli4_params(struct lpfcMboxq *); +void lpfc_pc_sli4_params(struct lpfcMboxq *); int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); - +int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); @@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); void lpfc_fdmi_timeout_handler(struct lpfc_vport *); +void lpfc_delayed_disc_tmo(unsigned long); +void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); int lpfc_config_port_prep(struct lpfc_hba *); int lpfc_config_port_post(struct lpfc_hba *); @@ -252,8 +254,8 @@ uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t); void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); - -void lpfc_reset_barrier(struct lpfc_hba * phba); +int lpfc_selective_reset(struct lpfc_hba *); +void lpfc_reset_barrier(struct lpfc_hba *); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); int lpfc_sli_brdkill(struct lpfc_hba *); int lpfc_sli_brdreset(struct lpfc_hba *); @@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; extern int lpfc_sli_mode; extern int lpfc_enable_npiv; +extern int lpfc_delay_discovery; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); @@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t, uint16_t, uint16_t); void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); -void lpfc_cleanup_vports_rrqs(struct lpfc_vport *); +void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c004fa9a681e..d9edfd90d7ff 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1738,6 +1738,55 @@ fdmi_cmd_exit: return 1; } +/** + * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. + * @ptr - Context object of the timer. + * + * This function set the WORKER_DELAYED_DISC_TMO flag and wake up + * the worker thread. + **/ +void +lpfc_delayed_disc_tmo(unsigned long ptr) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)ptr; + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long iflag; + + spin_lock_irqsave(&vport->work_port_lock, iflag); + tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; + if (!tmo_posted) + vport->work_port_events |= WORKER_DELAYED_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_delayed_disc_timeout_handler - Function called by worker thread to + * handle delayed discovery. + * @vport: pointer to a host virtual N_Port data structure. + * + * This function start nport discovery of the vport. + **/ +void +lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + if (!(vport->fc_flag & FC_DISC_DELAYED)) { + spin_unlock_irq(shost->host_lock); + return; + } + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + + lpfc_do_scr_ns_plogi(vport->phba, vport); +} + void lpfc_fdmi_tmo(unsigned long ptr) { diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a80d938fafc9..3d967741c708 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2009 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -57,8 +57,8 @@ * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: - * lpfc/lpfcX/vportY - * where X is the lpfc hba unique_id + * /sys/kernel/debug/lpfc/fnX/vportY + * where X is the lpfc hba function unique_id * where Y is the vport VPI on that hba * * Debugging services available per vport: @@ -82,52 +82,34 @@ * the HBA. X MUST also be a power of 2. */ static int lpfc_debugfs_enable = 1; -module_param(lpfc_debugfs_enable, int, 0); +module_param(lpfc_debugfs_enable, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_disc_trc; -module_param(lpfc_debugfs_max_disc_trc, int, 0); +module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_slow_ring_trc; -module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); +module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); static int lpfc_debugfs_mask_disc_trc; -module_param(lpfc_debugfs_mask_disc_trc, int, 0); +module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); #include <linux/debugfs.h> -/* size of output line, for discovery_trace and slow_ring_trace */ -#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 - -/* nodelist output buffer size */ -#define LPFC_NODELIST_SIZE 8192 -#define LPFC_NODELIST_ENTRY_SIZE 120 - -/* dumpHBASlim output buffer size */ -#define LPFC_DUMPHBASLIM_SIZE 4096 - -/* dumpHostSlim output buffer size */ -#define LPFC_DUMPHOSTSLIM_SIZE 4096 - -/* hbqinfo output buffer size */ -#define LPFC_HBQINFO_SIZE 8192 - -struct lpfc_debug { - char *buffer; - int len; -}; - static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; +/* iDiag */ +static struct lpfc_idiag idiag; + /** * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer * @vport: The vport to gather the log info from. @@ -926,7 +908,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) if (!debug) goto out; - /* Round to page boundry */ + /* Round to page boundary */ printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n", __func__, _dump_buf_data); debug->buffer = _dump_buf_data; @@ -956,7 +938,7 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) if (!debug) goto out; - /* Round to page boundry */ + /* Round to page boundary */ printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%s\n", __func__, _dump_buf_dif, file->f_dentry->d_name.name); debug->buffer = _dump_buf_dif; @@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } - - /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. @@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; + return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, debug->len); } @@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) return 0; } +/* + * iDiag debugfs file access methods + */ + +/* + * iDiag PCI config space register access methods: + * + * The PCI config space register accessees of read, write, read-modify-write + * for set bits, and read-modify-write for clear bits to SLI4 PCI functions + * are provided. In the proper SLI4 PCI function's debugfs iDiag directory, + * + * /sys/kernel/debug/lpfc/fn<#>/iDiag + * + * the access is through the debugfs entry pciCfg: + * + * 1. For PCI config space register read access, there are two read methods: + * A) read a single PCI config space register in the size of a byte + * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through + * the 4K extended PCI config space. + * + * A) Read a single PCI config space register consists of two steps: + * + * Step-1: Set up PCI config space register read command, the command + * syntax is, + * + * echo 1 <where> <count> > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, <where> is the + * offset from the beginning of the device's PCI config space to read from, + * and <count> is the size of PCI config space register data to read back, + * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits + * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes). + * + * Setp-2: Perform the debugfs read operation to execute the idiag command + * set up in Step-1, + * + * cat pciCfg + * + * Examples: + * To read PCI device's vendor-id and device-id from PCI config space, + * + * echo 1 0 4 > pciCfg + * cat pciCfg + * + * To read PCI device's currnt command from config space, + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * B) Browse through the entire 4K extended PCI config space also consists + * of two steps: + * + * Step-1: Set up PCI config space register browsing command, the command + * syntax is, + * + * echo 1 0 4096 > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, 0 must be used + * as the offset for PCI config space register browse, and 4096 must be + * used as the count for PCI config space register browse. + * + * Step-2: Repeately issue the debugfs read operation to browse through + * the entire PCI config space registers: + * + * cat pciCfg + * cat pciCfg + * cat pciCfg + * ... + * + * When browsing to the end of the 4K PCI config space, the browse method + * shall wrap around to start reading from beginning again, and again... + * + * 2. For PCI config space register write access, it supports a single PCI + * config space register write in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 2 <where> <count> <value> > pciCfg + * + * where, 2 is the iDiag command for PCI config space write, <where> is + * the offset from the beginning of the device's PCI config space to write + * into, <count> is the size of data to write into the PCI config space, + * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits + * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value> + * is the data to be written into the PCI config space register at the + * offset. + * + * Examples: + * To disable PCI device's interrupt assertion, + * + * 1) Read in device's PCI config space register command field <cmd>: + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>: + * + * <cmd> = <cmd> | (1 < 10) + * + * 3) Write the modified command back: + * + * echo 2 4 2 <cmd> > pciCfg + * + * 3. For PCI config space register set bits access, it supports a single PCI + * config space register set bits in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 3 <where> <count> <bitmask> > pciCfg + * + * where, 3 is the iDiag command for PCI config space set bits, <where> is + * the offset from the beginning of the device's PCI config space to set + * bits into, <count> is the size of the bitmask to set into the PCI config + * space, it will be 1 for setting a byte (8 bits), 2 for setting a word + * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and + * <bitmask> is the bitmask, indicating the bits to be set into the PCI + * config space register at the offset. The logic performed to the content + * of the PCI config space register, regval, is, + * + * regval |= <bitmask> + * + * 4. For PCI config space register clear bits access, it supports a single + * PCI config space register clear bits in the size of a byte (8 bits), + * a word (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 4 <where> <count> <bitmask> > pciCfg + * + * where, 4 is the iDiag command for PCI config space clear bits, <where> + * is the offset from the beginning of the device's PCI config space to + * clear bits from, <count> is the size of the bitmask to set into the PCI + * config space, it will be 1 for setting a byte (8 bits), 2 for setting + * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 + * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared + * from the PCI config space register at the offset. the logic performed + * to the content of the PCI config space register, regval, is, + * + * regval &= ~<bitmask> + * + * Note, for all single register read, write, set bits, or clear bits access, + * the offset (<where>) must be aligned with the size of the data: + * + * For data size of byte (8 bits), the offset must be aligned to the byte + * boundary; for data size of word (16 bits), the offset must be aligned + * to the word boundary; while for data size of dword (32 bits), the offset + * must be aligned to the dword boundary. Otherwise, the interface will + * return the error: + * + * "-bash: echo: write error: Invalid argument". + * + * For example: + * + * echo 1 2 4 > pciCfg + * -bash: echo: write error: Invalid argument + * + * Note also, all of the numbers in the command fields for all read, write, + * set bits, and clear bits PCI config space register command fields can be + * either decimal or hex. + * + * For example, + * echo 1 0 4096 > pciCfg + * + * will be the same as + * echo 1 0 0x1000 > pciCfg + * + * And, + * echo 2 155 1 10 > pciCfg + * + * will be + * echo 2 0x9b 1 0xa > pciCfg + */ + +/** + * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space + * @buf: The pointer to the user space buffer. + * @nbytes: The number of bytes in the user space buffer. + * @idiag_cmd: pointer to the idiag command struct. + * + * This routine reads data from debugfs user space buffer and parses the + * buffer for getting the idiag command and arguments. The while space in + * between the set of data is used as the parsing separator. + * + * This routine returns 0 when successful, it returns proper error code + * back to the user space in error conditions. + */ +static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, + struct lpfc_idiag_cmd *idiag_cmd) +{ + char mybuf[64]; + char *pbuf, *step_str; + int bsize, i; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + memset(idiag_cmd, 0, sizeof(*idiag_cmd)); + bsize = min(nbytes, (sizeof(mybuf)-1)); + + if (copy_from_user(mybuf, buf, bsize)) + return -EFAULT; + pbuf = &mybuf[0]; + step_str = strsep(&pbuf, "\t "); + + /* The opcode must present */ + if (!step_str) + return -EINVAL; + + idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); + if (idiag_cmd->opcode == 0) + return -EINVAL; + + for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { + step_str = strsep(&pbuf, "\t "); + if (!step_str) + return 0; + idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); + } + return 0; +} + +/** + * lpfc_idiag_open - idiag open debugfs + * @inode: The inode pointer that contains a pointer to phba. + * @file: The file pointer to attach the file operation. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It + * gets the reference to phba from the i_private field in @inode, it then + * allocates buffer for the file operation, performs the necessary PCI config + * space read into the allocated buffer according to the idiag user command + * setup, and then returns a pointer to buffer in the private_data field in + * @file. + * + * Returns: + * This function returns zero if successful. On error it will return an + * negative error value. + **/ +static int +lpfc_idiag_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + debug->buffer = NULL; + file->private_data = debug; + + return 0; +} + +/** + * lpfc_idiag_release - Release idiag access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine is the generic release routine for the idiag access file + * operation, it frees the buffer that was allocated when the debugfs file + * was opened. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_cmd_release - Release idiag cmd access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine frees the buffer that was allocated when the debugfs file + * was opened. It also reset the fields in the idiag command struct in the + * case the command is not continuous browsing of the data structure. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_cmd_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Read PCI config register, if not read all, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_RD) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) + if ((idiag.cmd.data[1] == sizeof(uint8_t)) || + (idiag.cmd.data[1] == sizeof(uint16_t)) || + (idiag.cmd.data[1] == sizeof(uint32_t))) + memset(&idiag, 0, sizeof(idiag)); + + /* Write PCI config register, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_WR) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) + memset(&idiag, 0, sizeof(idiag)); + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci config space according to the + * idiag command, and copies to user @buf. Depending on the PCI config space + * read command setup, it does either a single register read of a byte + * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all + * registers from the 4K extended PCI config space. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; + int where, count; + char *pbuffer; + struct pci_dev *pdev; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + } else + return 0; + + /* Read single PCI config space register */ + switch (count) { + case SIZE_U8: /* byte (8 bits) */ + pci_read_config_byte(pdev, where, &u8val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %02x\n", where, u8val); + break; + case SIZE_U16: /* word (16 bits) */ + pci_read_config_word(pdev, where, &u16val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %04x\n", where, u16val); + break; + case SIZE_U32: /* double word (32 bits) */ + pci_read_config_dword(pdev, where, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %08x\n", where, u32val); + break; + case LPFC_PCI_CFG_SIZE: /* browse all */ + goto pcicfg_browse; + break; + default: + /* illegal count */ + len = 0; + break; + } + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +pcicfg_browse: + + /* Browse all PCI config space registers */ + offset_label = idiag.offset.last_rd; + offset = offset_label; + + /* Read PCI config space */ + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: ", offset_label); + while (index > 0) { + pci_read_config_dword(pdev, offset, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%08x ", u32val); + offset += sizeof(uint32_t); + index -= sizeof(uint32_t); + if (!index) + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n%03x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci cfg read */ + idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; + if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + idiag.offset.last_rd = 0; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI config space read or write command + * accordingly. In the case of PCI config space read command, it sets up + * the command in the idiag command struct for the debugfs read operation. + * In the case of PCI config space write operation, it executes the write + * operation into the PCI config space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t where, value, count; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + struct pci_dev *pdev; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc) + return rc; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + /* Read command from PCI config space, set up command fields */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + if (count == LPFC_PCI_CFG_SIZE) { + if (where != 0) + goto error_out; + } else if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + } + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + /* Write command to PCI config space, read-modify-write */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + value = idiag.cmd.data[2]; + /* Sanity checks */ + if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_byte(pdev, where, + (uint8_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val |= (uint8_t)value; + pci_write_config_byte(pdev, where, + u8val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val &= (uint8_t)(~value); + pci_write_config_byte(pdev, where, + u8val); + } + } + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_word(pdev, where, + (uint16_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val |= (uint16_t)value; + pci_write_config_word(pdev, where, + u16val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val &= (uint16_t)(~value); + pci_write_config_word(pdev, where, + u16val); + } + } + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_dword(pdev, where, value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val |= value; + pci_write_config_dword(pdev, where, + u32val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val &= ~value; + pci_write_config_dword(pdev, where, + u32val); + } + } + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_queinfo_read - idiag debugfs read queue information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba SLI4 PCI function queue information, + * and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int len = 0, fcp_qidx; + char *pbuffer; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + /* Get slow-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path EQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.sp_eq->queue_id, + phba->sli4_hba.sp_eq->entry_count, + phba->sli4_hba.sp_eq->host_index, + phba->sli4_hba.sp_eq->hba_index); + + /* Get fast-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path EQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, + phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, + phba->sli4_hba.fp_eq[fcp_qidx]->host_index, + phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.mbx_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.mbx_cq->entry_count, + phba->sli4_hba.mbx_cq->host_index, + phba->sli4_hba.mbx_cq->hba_index); + + /* Get slow-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.els_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.els_cq->entry_count, + phba->sli4_hba.els_cq->host_index, + phba->sli4_hba.els_cq->hba_index); + + /* Get fast-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path CQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox MQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.mbx_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], MQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_wq->entry_count, + phba->sli4_hba.mbx_wq->host_index, + phba->sli4_hba.mbx_wq->hba_index); + + /* Get slow-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path WQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.els_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_wq->entry_count, + phba->sli4_hba.els_wq->host_index, + phba->sli4_hba.els_wq->hba_index); + + /* Get fast-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path WQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get receive queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path RQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.hdr_rq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RHQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.hdr_rq->entry_count, + phba->sli4_hba.hdr_rq->host_index, + phba->sli4_hba.hdr_rq->hba_index); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RDQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.dat_rq->entry_count, + phba->sli4_hba.dat_rq->host_index, + phba->sli4_hba.dat_rq->hba_index); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { static struct dentry *lpfc_debugfs_root = NULL; static atomic_t lpfc_debugfs_hba_count; + +/* + * File operations for the iDiag debugfs + */ +#undef lpfc_idiag_op_pciCfg +static const struct file_operations lpfc_idiag_op_pciCfg = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_pcicfg_read, + .write = lpfc_idiag_pcicfg_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_queInfo +static const struct file_operations lpfc_idiag_op_queInfo = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .read = lpfc_idiag_queinfo_read, + .release = lpfc_idiag_release, +}; + #endif /** @@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; - /* Setup lpfcX directory for specific HBA */ - snprintf(name, sizeof(name), "lpfc%d", phba->brd_no); + /* Setup funcX directory for specific HBA PCI function */ + snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); @@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } /* Setup dumpHBASlim */ - snprintf(name, sizeof(name), "dumpHBASlim"); - phba->debug_dumpHBASlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHBASlim); - if (!phba->debug_dumpHBASlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0413 Cannot create debugfs dumpHBASlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHBASlim"); + phba->debug_dumpHBASlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHBASlim); + if (!phba->debug_dumpHBASlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0413 Cannot create debugfs " + "dumpHBASlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpHostSlim */ - snprintf(name, sizeof(name), "dumpHostSlim"); - phba->debug_dumpHostSlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHostSlim); - if (!phba->debug_dumpHostSlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0414 Cannot create debugfs dumpHostSlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHostSlim"); + phba->debug_dumpHostSlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHostSlim); + if (!phba->debug_dumpHostSlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0414 Cannot create debugfs " + "dumpHostSlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); @@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } - - /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; @@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } } - snprintf(name, sizeof(name), "slow_ring_trace"); phba->debug_slow_ring_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -1378,7 +2158,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_dir(name, phba->hba_debugfs_root); if (!vport->vport_debugfs_root) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0417 Cant create debugfs\n"); + "0417 Can't create debugfs\n"); goto debug_failed; } atomic_inc(&phba->debugfs_vport_count); @@ -1431,9 +2211,56 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) vport, &lpfc_debugfs_op_nodelist); if (!vport->debug_nodelist) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0409 Cant create debugfs nodelist\n"); + "0409 Can't create debugfs nodelist\n"); + goto debug_failed; + } + + /* + * iDiag debugfs root entry points for SLI4 device only + */ + if (phba->sli_rev < LPFC_SLI_REV4) goto debug_failed; + + snprintf(name, sizeof(name), "iDiag"); + if (!phba->idiag_root) { + phba->idiag_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + if (!phba->idiag_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2922 Can't create idiag debugfs\n"); + goto debug_failed; + } + /* Initialize iDiag data structure */ + memset(&idiag, 0, sizeof(idiag)); } + + /* iDiag read PCI config space */ + snprintf(name, sizeof(name), "pciCfg"); + if (!phba->idiag_pci_cfg) { + phba->idiag_pci_cfg = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); + if (!phba->idiag_pci_cfg) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2923 Can't create idiag debugfs\n"); + goto debug_failed; + } + idiag.offset.last_rd = 0; + } + + /* iDiag get PCI function queue information */ + snprintf(name, sizeof(name), "queInfo"); + if (!phba->idiag_que_info) { + phba->idiag_que_info = + debugfs_create_file(name, S_IFREG|S_IRUGO, + phba->idiag_root, phba, &lpfc_idiag_op_queInfo); + if (!phba->idiag_que_info) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2924 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + debug_failed: return; #endif @@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) phba->debug_slow_ring_trc = NULL; } + /* + * iDiag release + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->idiag_que_info) { + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + } + if (phba->idiag_pci_cfg) { + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; + } + + /* Finally remove the iDiag debugfs root */ + if (phba->idiag_root) { + /* iDiag root */ + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; + } + } + if (phba->hba_debugfs_root) { - debugfs_remove(phba->hba_debugfs_root); /* lpfcX */ + debugfs_remove(phba->hba_debugfs_root); /* fnX */ phba->hba_debugfs_root = NULL; atomic_dec(&lpfc_debugfs_hba_count); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 03c7313a1012..91b9a9427cda 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -22,6 +22,44 @@ #define _H_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS + +/* size of output line, for discovery_trace and slow_ring_trace */ +#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 + +/* nodelist output buffer size */ +#define LPFC_NODELIST_SIZE 8192 +#define LPFC_NODELIST_ENTRY_SIZE 120 + +/* dumpHBASlim output buffer size */ +#define LPFC_DUMPHBASLIM_SIZE 4096 + +/* dumpHostSlim output buffer size */ +#define LPFC_DUMPHOSTSLIM_SIZE 4096 + +/* hbqinfo output buffer size */ +#define LPFC_HBQINFO_SIZE 8192 + +/* rdPciConf output buffer size */ +#define LPFC_PCI_CFG_SIZE 4096 +#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) +#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) + +/* queue info output buffer size */ +#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 + +#define SIZE_U8 sizeof(uint8_t) +#define SIZE_U16 sizeof(uint16_t) +#define SIZE_U32 sizeof(uint32_t) + +struct lpfc_debug { + char *i_private; + char op; +#define LPFC_IDIAG_OP_RD 1 +#define LPFC_IDIAG_OP_WR 2 + char *buffer; + int len; +}; + struct lpfc_debugfs_trc { char *fmt; uint32_t data1; @@ -30,6 +68,26 @@ struct lpfc_debugfs_trc { uint32_t seq_cnt; unsigned long jif; }; + +struct lpfc_idiag_offset { + uint32_t last_rd; +}; + +#define LPFC_IDIAG_CMD_DATA_SIZE 4 +struct lpfc_idiag_cmd { + uint32_t opcode; +#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 +#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 +#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 +#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 + uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; +}; + +struct lpfc_idiag { + uint32_t active; + struct lpfc_idiag_cmd cmd; + struct lpfc_idiag_offset offset; +}; #endif /* Mask for discovery_trace */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c62d567cc845..d34b69f9cdb1 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -89,7 +89,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) return 0; /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; if (!(ha_copy & HA_LATT)) return 0; @@ -101,7 +102,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) phba->pport->port_state); /* CLEAR_LA should re-enable link attention events and - * we should then imediately take a LATT event. The + * we should then immediately take a LATT event. The * LATT processing should call lpfc_linkdown() which * will cleanup any left over in-progress discovery * events. @@ -485,6 +486,59 @@ fail: } /** + * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. + * @vport: pointer to a host virtual N_Port data structure. + * @sp: pointer to service parameter data structure. + * + * This routine is called from FLOGI/FDISC completion handler functions. + * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter else return + * 0. This function also set flag in the vport data structure to delay + * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit + * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter. + * + * Return code + * 0 - FCID and Fabric Nodename and Fabric portname is not changed. + * 1 - FCID or Fabric Nodename or Fabric portname is changed. + * + **/ +static uint8_t +lpfc_check_clean_addr_bit(struct lpfc_vport *vport, + struct serv_parm *sp) +{ + uint8_t fabric_param_changed = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if ((vport->fc_prevDID != vport->fc_myDID) || + memcmp(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)) || + memcmp(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name))) + fabric_param_changed = 1; + + /* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + * + * If fabric parameter is changed and clean address bit is + * cleared delay nport discovery if + * - vport->fc_prevDID != 0 (not initial discovery) OR + * - lpfc_delay_discovery module parameter is set. + */ + if (fabric_param_changed && !sp->cmn.clean_address_bit && + (vport->fc_prevDID || lpfc_delay_discovery)) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + } + + return fabric_param_changed; +} + + +/** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. @@ -512,6 +566,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *np; struct lpfc_nodelist *next_np; + uint8_t fabric_param_changed; spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_FABRIC; @@ -544,6 +599,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { @@ -565,7 +626,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - if ((vport->fc_prevDID != vport->fc_myDID) && + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all @@ -1538,7 +1599,7 @@ out: * This routine is the completion callback function for issuing the Port * Login (PLOGI) command. For PLOGI completion, there must be an active * ndlp on the vport node list that matches the remote node ID from the - * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply + * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply * ignored and command IOCB released. The PLOGI response IOCB status is * checked for error conditons. If there is error status reported, PLOGI * retry shall be attempted by invoking the lpfc_els_retry() routine. @@ -2203,6 +2264,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_sli *psli; + struct lpfcMboxq *mbox; psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ @@ -2260,6 +2322,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_EVT_CMPL_LOGO); out: lpfc_els_free_iocb(phba, cmdiocb); + /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ + if ((vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_PT2PT_PLOGI)) { + phba->pport->fc_myDID = 0; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == + MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + } + } + } return; } @@ -2745,7 +2822,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) } break; case ELS_CMD_FDISC: - lpfc_issue_els_fdisc(vport, ndlp, retry); + if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) + lpfc_issue_els_fdisc(vport, ndlp, retry); break; } return; @@ -2815,9 +2893,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, switch (irsp->ulpStatus) { case IOSTAT_FCP_RSP_ERROR: + break; case IOSTAT_REMOTE_STOP: + if (phba->sli_rev == LPFC_SLI_REV4) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, ndlp, + cmdiocb->sli4_xritag, 0, 0); + } break; - case IOSTAT_LOCAL_REJECT: switch ((irsp->un.ulpWord[4] & 0xff)) { case IOERR_LOOP_OPEN_FAILURE: @@ -4013,28 +4099,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, uint8_t *pcmd; struct RRQ *rrq; uint16_t rxid; + uint16_t xri; struct lpfc_node_rrq *prrq; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); pcmd += sizeof(uint32_t); rrq = (struct RRQ *)pcmd; - rxid = bf_get(rrq_oxid, rrq); + rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); + rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" " x%x x%x\n", - bf_get(rrq_did, rrq), - bf_get(rrq_oxid, rrq), + be32_to_cpu(bf_get(rrq_did, rrq)), + be16_to_cpu(bf_get(rrq_oxid, rrq)), rxid, iocb->iotag, iocb->iocb.ulpContext); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); - prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID); + if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) + xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); + else + xri = rxid; + prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); if (prrq) - lpfc_clr_rrq_active(phba, rxid, prrq); + lpfc_clr_rrq_active(phba, xri, prrq); return; } @@ -6166,6 +6258,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (vport->load_flag & FC_UNLOADING) goto dropit; + /* If NPort discovery is delayed drop incoming ELS */ + if ((vport->fc_flag & FC_DISC_DELAYED) && + (cmd != ELS_CMD_PLOGI)) + goto dropit; + ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ @@ -6218,6 +6315,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); lpfc_send_els_event(vport, ndlp, payload); + + /* If Nport discovery is delayed, reject PLOGIs */ + if (vport->fc_flag & FC_DISC_DELAYED) { + rjt_err = LSRJT_UNABLE_TPC; + break; + } if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6596,6 +6699,21 @@ void lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *ndlp_fdmi; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* + * If lpfc_delay_discovery parameter is set and the clean address + * bit is cleared and fc fabric parameters chenged, delay FC NPort + * discovery. + */ + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_DISC_DELAYED) { + spin_unlock_irq(shost->host_lock); + mod_timer(&vport->delayed_disc_tmo, + jiffies + HZ * phba->fc_ratov); + return; + } + spin_unlock_irq(shost->host_lock); ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp) { @@ -6938,6 +7056,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *next_np; IOCB_t *irsp = &rspiocb->iocb; struct lpfc_iocbq *piocb; + struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; + struct serv_parm *sp; + uint8_t fabric_param_changed; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0123 FDISC completes. x%x/x%x prevDID: x%x\n", @@ -6981,7 +7102,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); - if ((vport->fc_prevDID != vport->fc_myDID) && + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + sp = prsp->virt + sizeof(uint32_t); + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all * remaining NPORTs get unreg_login'ed so we can @@ -7582,6 +7710,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) } /** + * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) + sglq_entry->ndlp = NULL; + } + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + +/** * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the els xri abort wcqe structure. diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index bb015960dbc9..301498301a8f 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_RAMP_UP_QUEUE) lpfc_ramp_up_queue_handler(phba); + if (work_port_events & WORKER_DELAYED_DISC_TMO) + lpfc_delayed_disc_timeout_handler(vport); } lpfc_destroy_vport_work_array(phba, vports); @@ -737,7 +739,7 @@ lpfc_do_work(void *p) /* * This is only called to handle FC worker events. Since this a rare - * occurance, we allocate a struct lpfc_work_evt structure here instead of + * occurrence, we allocate a struct lpfc_work_evt structure here instead of * embedding it in the IOCB. */ int @@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport) lpfc_port_link_failure(vport); + /* Stop delayed Nport discovery */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&vport->delayed_disc_tmo); } int @@ -1341,7 +1348,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) int rc; spin_lock_irq(&phba->hbalock); - /* If the FCF is not availabe do nothing. */ + /* If the FCF is not available do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); @@ -1531,7 +1538,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, /* * If user did not specify any addressing mode, or if the - * prefered addressing mode specified by user is not supported + * preferred addressing mode specified by user is not supported * by FCF, allow fabric to pick the addressing mode. */ *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, @@ -1546,7 +1553,7 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, FCFCNCT_AM_SPMA) ? LPFC_FCF_SPMA : LPFC_FCF_FPMA; /* - * If the user specified a prefered address mode, use the + * If the user specified a preferred address mode, use the * addr mode only if FCF support the addr_mode. */ else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && @@ -3110,7 +3117,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * back at reg login state so this * mbox needs to be ignored becase * there is another reg login in - * proccess. + * process. */ spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; @@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_unlock_irq(shost->host_lock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); - lpfc_cleanup_vports_rrqs(vport); + lpfc_cleanup_vports_rrqs(vport, NULL); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() @@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_nlp_put(ndlp); return; } @@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); - + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); return 0; @@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; + unsigned long iflags; - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); ndlp = __lpfc_findnode_did(vport, did); - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); return ndlp; } @@ -4467,7 +4477,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if ((vport->fc_flag & FC_RSCN_MODE) && !(vport->fc_flag & FC_NDISC_ACTIVE)) { if (lpfc_rscn_payload_check(vport, did)) { - /* If we've already recieved a PLOGI from this NPort + /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ if (ndlp->nlp_flag & NLP_RCV_PLOGI) @@ -4483,7 +4493,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else ndlp = NULL; } else { - /* If we've already recieved a PLOGI from this NPort, + /* If we've already received a PLOGI from this NPort, * or we are already in the process of discovery on it, * we don't need to try to discover it again. */ @@ -5746,7 +5756,7 @@ lpfc_read_fcoe_param(struct lpfc_hba *phba, * @size: Size of the data buffer. * @rec_type: Record type to be searched. * - * This function searches config region data to find the begining + * This function searches config region data to find the beginning * of the record specified by record_type. If record found, this * function return pointer to the record else return NULL. */ diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 96ed3ba6ba95..95f11ed79463 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -341,6 +341,12 @@ struct csp { uint8_t bbCreditMsb; uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ +/* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + */ +#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ @@ -1338,7 +1344,7 @@ typedef struct { /* FireFly BIU registers */ #define HS_FFER1 0x80000000 /* Bit 31 */ #define HS_CRIT_TEMP 0x00000100 /* Bit 8 */ #define HS_FFERM 0xFF000100 /* Mask for error bits 31:24 and 8 */ - +#define UNPLUG_ERR 0x00000001 /* Indicate pci hot unplug */ /* Host Control Register */ #define HC_REG_OFFSET 12 /* Byte offset from register base address */ @@ -1707,6 +1713,17 @@ struct lpfc_pde6 { #define pde6_apptagval_WORD word2 }; +struct lpfc_pde7 { + uint32_t word0; +#define pde7_type_SHIFT 24 +#define pde7_type_MASK 0x000000ff +#define pde7_type_WORD word0 +#define pde7_rsvd0_SHIFT 0 +#define pde7_rsvd0_MASK 0x00ffffff +#define pde7_rsvd0_WORD word0 + uint32_t addrHigh; + uint32_t addrLow; +}; /* Structure for MB Command LOAD_SM and DOWN_LOAD */ @@ -3198,7 +3215,10 @@ typedef struct { #define IOERR_SLER_RRQ_RJT_ERR 0x4C #define IOERR_SLER_RRQ_RETRY_ERR 0x4D #define IOERR_SLER_ABTS_ERR 0x4E - +#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0 +#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1 +#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2 +#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3 #define IOERR_DRVR_MASK 0x100 #define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ #define IOERR_SLI_BRESET 0x102 @@ -3612,7 +3632,7 @@ typedef struct _IOCB { /* IOCB structure */ ASYNCSTAT_FIELDS asyncstat; /* async_status iocb */ QUE_XRI64_CX_FIELDS quexri64cx; /* que_xri64_cx fields */ struct rcv_seq64 rcvseq64; /* RCV_SEQ64 and RCV_CONT64 */ - struct sli4_bls_acc bls_acc; /* UNSOL ABTS BLS_ACC params */ + struct sli4_bls_rsp bls_rsp; /* UNSOL ABTS BLS_RSP params */ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */ } un; union { diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 94c1aa1136de..8433ac0d9fb4 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -215,7 +215,7 @@ struct lpfc_sli4_flags { #define lpfc_fip_flag_WORD word0 }; -struct sli4_bls_acc { +struct sli4_bls_rsp { uint32_t word0_rsvd; /* Word0 must be reserved */ uint32_t word1; #define lpfc_abts_orig_SHIFT 0 @@ -231,6 +231,16 @@ struct sli4_bls_acc { #define lpfc_abts_oxid_MASK 0x0000FFFF #define lpfc_abts_oxid_WORD word2 uint32_t word3; +#define lpfc_vndr_code_SHIFT 0 +#define lpfc_vndr_code_MASK 0x000000FF +#define lpfc_vndr_code_WORD word3 +#define lpfc_rsn_expln_SHIFT 8 +#define lpfc_rsn_expln_MASK 0x000000FF +#define lpfc_rsn_expln_WORD word3 +#define lpfc_rsn_code_SHIFT 16 +#define lpfc_rsn_code_MASK 0x000000FF +#define lpfc_rsn_code_WORD word3 + uint32_t word4; uint32_t word5_rsvd; /* Word5 must be reserved */ }; @@ -711,21 +721,27 @@ struct lpfc_sli4_cfg_mhdr { union lpfc_sli4_cfg_shdr { struct { uint32_t word6; -#define lpfc_mbox_hdr_opcode_SHIFT 0 -#define lpfc_mbox_hdr_opcode_MASK 0x000000FF -#define lpfc_mbox_hdr_opcode_WORD word6 -#define lpfc_mbox_hdr_subsystem_SHIFT 8 -#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF -#define lpfc_mbox_hdr_subsystem_WORD word6 -#define lpfc_mbox_hdr_port_number_SHIFT 16 -#define lpfc_mbox_hdr_port_number_MASK 0x000000FF -#define lpfc_mbox_hdr_port_number_WORD word6 -#define lpfc_mbox_hdr_domain_SHIFT 24 -#define lpfc_mbox_hdr_domain_MASK 0x000000FF -#define lpfc_mbox_hdr_domain_WORD word6 +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 uint32_t timeout; uint32_t request_length; - uint32_t reserved9; + uint32_t word9; +#define lpfc_mbox_hdr_version_SHIFT 0 +#define lpfc_mbox_hdr_version_MASK 0x000000FF +#define lpfc_mbox_hdr_version_WORD word9 +#define LPFC_Q_CREATE_VERSION_2 2 +#define LPFC_Q_CREATE_VERSION_1 1 +#define LPFC_Q_CREATE_VERSION_0 0 } request; struct { uint32_t word6; @@ -778,6 +794,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -916,9 +933,12 @@ struct cq_context { #define LPFC_CQ_CNT_512 0x1 #define LPFC_CQ_CNT_1024 0x2 uint32_t word1; -#define lpfc_cq_eq_id_SHIFT 22 +#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */ #define lpfc_cq_eq_id_MASK 0x000000FF #define lpfc_cq_eq_id_WORD word1 +#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */ +#define lpfc_cq_eq_id_2_MASK 0x0000FFFF +#define lpfc_cq_eq_id_2_WORD word1 uint32_t reserved0; uint32_t reserved1; }; @@ -928,6 +948,9 @@ struct lpfc_mbx_cq_create { union { struct { uint32_t word0; +#define lpfc_mbx_cq_create_page_size_SHIFT 16 /* Version 2 Only */ +#define lpfc_mbx_cq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_cq_create_page_size_WORD word0 #define lpfc_mbx_cq_create_num_pages_SHIFT 0 #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF #define lpfc_mbx_cq_create_num_pages_WORD word0 @@ -968,7 +991,7 @@ struct wq_context { struct lpfc_mbx_wq_create { struct mbox_header header; union { - struct { + struct { /* Version 0 Request */ uint32_t word0; #define lpfc_mbx_wq_create_num_pages_SHIFT 0 #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF @@ -978,6 +1001,23 @@ struct lpfc_mbx_wq_create { #define lpfc_mbx_wq_create_cq_id_WORD word0 struct dma_address page[LPFC_MAX_WQ_PAGE]; } request; + struct { /* Version 1 Request */ + uint32_t word0; /* Word 0 is the same as in v0 */ + uint32_t word1; +#define lpfc_mbx_wq_create_page_size_SHIFT 0 +#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF +#define lpfc_mbx_wq_create_page_size_WORD word1 +#define lpfc_mbx_wq_create_wqe_size_SHIFT 8 +#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F +#define lpfc_mbx_wq_create_wqe_size_WORD word1 +#define LPFC_WQ_WQE_SIZE_64 0x5 +#define LPFC_WQ_WQE_SIZE_128 0x6 +#define lpfc_mbx_wq_create_wqe_count_SHIFT 16 +#define lpfc_mbx_wq_create_wqe_count_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_wqe_count_WORD word1 + uint32_t word2; + struct dma_address page[LPFC_MAX_WQ_PAGE-1]; + } request_1; struct { uint32_t word0; #define lpfc_mbx_wq_create_q_id_SHIFT 0 @@ -1006,13 +1046,22 @@ struct lpfc_mbx_wq_destroy { #define LPFC_DATA_BUF_SIZE 2048 struct rq_context { uint32_t word0; -#define lpfc_rq_context_rq_size_SHIFT 16 -#define lpfc_rq_context_rq_size_MASK 0x0000000F -#define lpfc_rq_context_rq_size_WORD word0 +#define lpfc_rq_context_rqe_count_SHIFT 16 /* Version 0 Only */ +#define lpfc_rq_context_rqe_count_MASK 0x0000000F +#define lpfc_rq_context_rqe_count_WORD word0 #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ +#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */ +#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF +#define lpfc_rq_context_rqe_count_1_WORD word0 +#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */ +#define lpfc_rq_context_rqe_size_MASK 0x0000000F +#define lpfc_rq_context_rqe_size_WORD word0 +#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */ +#define lpfc_rq_context_page_size_MASK 0x000000FF +#define lpfc_rq_context_page_size_WORD word0 uint32_t reserved1; uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 @@ -1021,7 +1070,7 @@ struct rq_context { #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF #define lpfc_rq_context_buf_size_WORD word2 - uint32_t reserved3; + uint32_t buffer_size; /* Version 1 Only */ }; struct lpfc_mbx_rq_create { @@ -1061,16 +1110,16 @@ struct lpfc_mbx_rq_destroy { struct mq_context { uint32_t word0; -#define lpfc_mq_context_cq_id_SHIFT 22 +#define lpfc_mq_context_cq_id_SHIFT 22 /* Version 0 Only */ #define lpfc_mq_context_cq_id_MASK 0x000003FF #define lpfc_mq_context_cq_id_WORD word0 -#define lpfc_mq_context_count_SHIFT 16 -#define lpfc_mq_context_count_MASK 0x0000000F -#define lpfc_mq_context_count_WORD word0 -#define LPFC_MQ_CNT_16 0x5 -#define LPFC_MQ_CNT_32 0x6 -#define LPFC_MQ_CNT_64 0x7 -#define LPFC_MQ_CNT_128 0x8 +#define lpfc_mq_context_ring_size_SHIFT 16 +#define lpfc_mq_context_ring_size_MASK 0x0000000F +#define lpfc_mq_context_ring_size_WORD word0 +#define LPFC_MQ_RING_SIZE_16 0x5 +#define LPFC_MQ_RING_SIZE_32 0x6 +#define LPFC_MQ_RING_SIZE_64 0x7 +#define LPFC_MQ_RING_SIZE_128 0x8 uint32_t word1; #define lpfc_mq_context_valid_SHIFT 31 #define lpfc_mq_context_valid_MASK 0x00000001 @@ -1104,9 +1153,12 @@ struct lpfc_mbx_mq_create_ext { union { struct { uint32_t word0; -#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 -#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 +#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 +#define lpfc_mbx_mq_create_ext_cq_id_SHIFT 16 /* Version 1 Only */ +#define lpfc_mbx_mq_create_ext_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_cq_id_WORD word0 uint32_t async_evt_bmap; #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 @@ -1852,6 +1904,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 +#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -1877,6 +1932,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 }; struct lpfc_mbx_supp_pages { @@ -1935,7 +1993,7 @@ struct lpfc_mbx_supp_pages { #define LPFC_SLI4_PARAMETERS 2 }; -struct lpfc_mbx_sli4_params { +struct lpfc_mbx_pc_sli4_params { uint32_t word1; #define qs_SHIFT 0 #define qs_MASK 0x00000001 @@ -2051,6 +2109,88 @@ struct lpfc_mbx_sli4_params { uint32_t rsvd_13_63[51]; }; +struct lpfc_sli4_parameters { + uint32_t word0; +#define cfg_prot_type_SHIFT 0 +#define cfg_prot_type_MASK 0x000000FF +#define cfg_prot_type_WORD word0 + uint32_t word1; +#define cfg_ft_SHIFT 0 +#define cfg_ft_MASK 0x00000001 +#define cfg_ft_WORD word1 +#define cfg_sli_rev_SHIFT 4 +#define cfg_sli_rev_MASK 0x0000000f +#define cfg_sli_rev_WORD word1 +#define cfg_sli_family_SHIFT 8 +#define cfg_sli_family_MASK 0x0000000f +#define cfg_sli_family_WORD word1 +#define cfg_if_type_SHIFT 12 +#define cfg_if_type_MASK 0x0000000f +#define cfg_if_type_WORD word1 +#define cfg_sli_hint_1_SHIFT 16 +#define cfg_sli_hint_1_MASK 0x000000ff +#define cfg_sli_hint_1_WORD word1 +#define cfg_sli_hint_2_SHIFT 24 +#define cfg_sli_hint_2_MASK 0x0000001f +#define cfg_sli_hint_2_WORD word1 + uint32_t word2; + uint32_t word3; + uint32_t word4; +#define cfg_cqv_SHIFT 14 +#define cfg_cqv_MASK 0x00000003 +#define cfg_cqv_WORD word4 + uint32_t word5; + uint32_t word6; +#define cfg_mqv_SHIFT 14 +#define cfg_mqv_MASK 0x00000003 +#define cfg_mqv_WORD word6 + uint32_t word7; + uint32_t word8; +#define cfg_wqv_SHIFT 14 +#define cfg_wqv_MASK 0x00000003 +#define cfg_wqv_WORD word8 + uint32_t word9; + uint32_t word10; +#define cfg_rqv_SHIFT 14 +#define cfg_rqv_MASK 0x00000003 +#define cfg_rqv_WORD word10 + uint32_t word11; +#define cfg_rq_db_window_SHIFT 28 +#define cfg_rq_db_window_MASK 0x0000000f +#define cfg_rq_db_window_WORD word11 + uint32_t word12; +#define cfg_fcoe_SHIFT 0 +#define cfg_fcoe_MASK 0x00000001 +#define cfg_fcoe_WORD word12 +#define cfg_phwq_SHIFT 15 +#define cfg_phwq_MASK 0x00000001 +#define cfg_phwq_WORD word12 +#define cfg_loopbk_scope_SHIFT 28 +#define cfg_loopbk_scope_MASK 0x0000000f +#define cfg_loopbk_scope_WORD word12 + uint32_t sge_supp_len; + uint32_t word14; +#define cfg_sgl_page_cnt_SHIFT 0 +#define cfg_sgl_page_cnt_MASK 0x0000000f +#define cfg_sgl_page_cnt_WORD word14 +#define cfg_sgl_page_size_SHIFT 8 +#define cfg_sgl_page_size_MASK 0x000000ff +#define cfg_sgl_page_size_WORD word14 +#define cfg_sgl_pp_align_SHIFT 16 +#define cfg_sgl_pp_align_MASK 0x000000ff +#define cfg_sgl_pp_align_WORD word14 + uint32_t word15; + uint32_t word16; + uint32_t word17; + uint32_t word18; + uint32_t word19; +}; + +struct lpfc_mbx_get_sli4_parameters { + struct mbox_header header; + struct lpfc_sli4_parameters sli4_parameters; +}; + /* Mailbox Completion Queue Error Messages */ #define MB_CQE_STATUS_SUCCESS 0x0 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 @@ -2103,7 +2243,8 @@ struct lpfc_mqe { struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; struct lpfc_mbx_query_fw_cfg query_fw_cfg; struct lpfc_mbx_supp_pages supp_pages; - struct lpfc_mbx_sli4_params sli4_params; + struct lpfc_mbx_pc_sli4_params sli4_params; + struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; struct lpfc_mbx_nop nop; } un; }; @@ -2381,6 +2522,10 @@ struct wqe_common { #define wqe_wqes_SHIFT 15 #define wqe_wqes_MASK 0x00000001 #define wqe_wqes_WORD word10 +/* Note that this field overlaps above fields */ +#define wqe_wqid_SHIFT 1 +#define wqe_wqid_MASK 0x0000007f +#define wqe_wqid_WORD word10 #define wqe_pri_SHIFT 16 #define wqe_pri_MASK 0x00000007 #define wqe_pri_WORD word10 @@ -2599,7 +2744,8 @@ struct fcp_iwrite64_wqe { uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_iread64_wqe { @@ -2608,7 +2754,8 @@ struct fcp_iread64_wqe { uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_icmnd64_wqe { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6d0b36aa3389..505f88443b5c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { /* Reset link speed to auto */ - lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1302 Invalid speed for this board: " "Reset link speed to auto: x%x\n", phba->cfg_link_speed); @@ -507,7 +507,10 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ - status = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + return -EIO; + } status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) status |= HC_R0INT_ENA; @@ -945,17 +948,13 @@ static void lpfc_rrq_timeout(unsigned long ptr) { struct lpfc_hba *phba; - uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE; - if (!tmo_posted) - phba->hba_flag |= HBA_RRQ_ACTIVE; + phba->hba_flag |= HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - if (!tmo_posted) - lpfc_worker_wake_up(phba); + lpfc_worker_wake_up(phba); } /** @@ -1226,7 +1225,10 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) /* Wait for the ER1 bit to clear.*/ while (phba->work_hs & HS_FFER1) { msleep(100); - phba->work_hs = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { + phba->work_hs = UNPLUG_ERR ; + break; + } /* If driver is unloading let the worker thread continue */ if (phba->pport->load_flag & FC_UNLOADING) { phba->work_hs = 0; @@ -2280,6 +2282,7 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } + lpfc_cleanup_vports_rrqs(vport, NULL); } /** @@ -2295,6 +2298,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->fc_fdmitmo); + del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -2355,6 +2359,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); + if (phba->sli_rev == LPFC_SLI_REV4) { + del_timer_sync(&phba->rrq_tmr); + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + } phba->hb_outstanding = 0; switch (phba->pci_dev_grp) { @@ -2732,6 +2740,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; + + init_timer(&vport->delayed_disc_tmo); + vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; + vport->delayed_disc_tmo.data = (unsigned long)vport; + error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; @@ -4283,36 +4296,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } - /* Get the Supported Pages. It is always available. */ + /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (unlikely(rc)) { - rc = -EIO; - mempool_free(mboxq, phba->mbox_mem_pool); - goto out_free_bsmbx; - } - - mqe = &mboxq->u.mqe; - memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), - LPFC_MAX_SUPPORTED_PAGES); - for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { - switch (pn_page[i]) { - case LPFC_SLI4_PARAMETERS: - phba->sli4_hba.pc_sli4_params.supported = 1; - break; - default: - break; + if (!rc) { + mqe = &mboxq->u.mqe; + memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), + LPFC_MAX_SUPPORTED_PAGES); + for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { + switch (pn_page[i]) { + case LPFC_SLI4_PARAMETERS: + phba->sli4_hba.pc_sli4_params.supported = 1; + break; + default: + break; + } + } + /* Read the port's SLI4 Parameters capabilities if supported. */ + if (phba->sli4_hba.pc_sli4_params.supported) + rc = lpfc_pc_sli4_params_get(phba, mboxq); + if (rc) { + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; } } - - /* Read the port's SLI4 Parameters capabilities if supported. */ - if (phba->sli4_hba.pc_sli4_params.supported) - rc = lpfc_pc_sli4_params_get(phba, mboxq); + /* + * Get sli4 parameters that override parameters from Port capabilities. + * If this call fails it is not a critical error so continue loading. + */ + lpfc_get_sli4_parameters(phba, mboxq); mempool_free(mboxq, phba->mbox_mem_pool); - if (rc) { - rc = -EIO; - goto out_free_bsmbx; - } /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) @@ -4452,7 +4466,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) } /** - * lpfc_init_api_table_setup - Set up init api fucntion jump table + * lpfc_init_api_table_setup - Set up init api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -4466,6 +4480,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) { phba->lpfc_hba_init_link = lpfc_hba_init_link; phba->lpfc_hba_down_link = lpfc_hba_down_link; + phba->lpfc_selective_reset = lpfc_selective_reset; switch (dev_grp) { case LPFC_PCI_DEV_LP: phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; @@ -4835,7 +4850,7 @@ out_free_mem: * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int @@ -5377,13 +5392,16 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) int i, port_error = 0; uint32_t if_type; + memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); + memset(®_data, 0, sizeof(reg_data)); if (!phba->sli4_hba.PSMPHRregaddr) return -ENODEV; /* Wait up to 30 seconds for the SLI Port POST done and ready */ for (i = 0; i < 3000; i++) { - portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); - if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { + if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr_reg.word0) || + (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { /* Port has a fatal POST error, break out */ port_error = -ENODEV; break; @@ -5464,9 +5482,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba) break; case LPFC_SLI_INTF_IF_TYPE_2: /* Final checks. The port status should be clean. */ - reg_data.word0 = - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); - if (bf_get(lpfc_sliport_status_err, ®_data)) { + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0) || + bf_get(lpfc_sliport_status_err, ®_data)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2. ERR1regaddr); @@ -5712,7 +5730,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static int @@ -5817,7 +5835,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static int @@ -5876,7 +5894,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static int @@ -6171,7 +6189,7 @@ out_error: * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ static void @@ -6235,7 +6253,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int @@ -6480,7 +6498,7 @@ out_error: * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ void @@ -6525,7 +6543,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory **/ static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) @@ -6686,7 +6704,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) * * Return codes * 0 - successful - * -ENOMEM - No availble memory + * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ int @@ -6752,9 +6770,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) * the loop again. */ for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { - reg_data.word0 = - readl(phba->sli4_hba.u.if_type2. - STATUSregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2. + STATUSregaddr, ®_data.word0)) { + rc = -ENODEV; + break; + } if (bf_get(lpfc_sliport_status_rdy, ®_data)) break; if (bf_get(lpfc_sliport_status_rn, ®_data)) { @@ -6775,8 +6795,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) } /* Detect any port errors. */ - reg_data.word0 = readl(phba->sli4_hba.u.if_type2. - STATUSregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + ®_data.word0)) { + rc = -ENODEV; + break; + } if ((bf_get(lpfc_sliport_status_err, ®_data)) || (rdy_chk >= 1000)) { phba->work_status[0] = readl( @@ -7810,7 +7833,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mqe = &mboxq->u.mqe; /* Read the port's SLI4 Parameters port capabilities */ - lpfc_sli4_params(mboxq); + lpfc_pc_sli4_params(mboxq); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { @@ -7854,6 +7877,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) } /** + * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to the mailboxq memory for the mailbox command response. + * + * This function is called in the SLI4 code path to read the port's + * sli4 capabilities. + * + * This function may be be called from any context that can block-wait + * for the completion. The expectation is that this routine is called + * typically from probe_one or from the online routine. + **/ +int +lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc; + struct lpfc_mqe *mqe = &mboxq->u.mqe; + struct lpfc_pc_sli4_params *sli4_params; + int length; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, + lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); + if (unlikely(rc)) + return rc; + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); + sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); + sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); + sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, + mbx_sli4_parameters); + sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, + mbx_sli4_parameters); + if (bf_get(cfg_phwq, mbx_sli4_parameters)) + phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; + sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; + sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); + sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); + sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); + sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); + sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, + mbx_sli4_parameters); + sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, + mbx_sli4_parameters); + return 0; +} + +/** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 23403c650207..fbab9734e9b4 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ - mb->un.varCfgPort.cdss = 1; /* Configure Security */ + if (phba->cfg_enable_dss) + mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); @@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. - * @length: Length of the sli4 config mailbox command. + * @length: Length of the sli4 config mailbox command (including sub-header). * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. @@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); - sli4_config->header.cfg_mhdr.payload_length = - LPFC_MBX_CMD_HDR_LENGTH + length; + sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); - sli4_config->header.cfg_shdr.request.request_length = length; + sli4_config->header.cfg_shdr.request.request_length = + length - LPFC_MBX_CMD_HDR_LENGTH; return length; } @@ -1833,7 +1834,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * @fcf_index: index to fcf table. * * This routine routine allocates and constructs non-embedded mailbox command - * for reading a FCF table entry refered by @fcf_index. + * for reading a FCF table entry referred by @fcf_index. * * Return: pointer to the mailbox command constructed if successful, otherwise * NULL. @@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) @@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox) } /** - * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params - * mailbox command. + * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to * retrieve the particular SLI4 features supported by the port. **/ void -lpfc_sli4_params(struct lpfcMboxq *mbox) +lpfc_pc_sli4_params(struct lpfcMboxq *mbox) { - struct lpfc_mbx_sli4_params *sli4_params; + struct lpfc_mbx_pc_sli4_params *sli4_params; memset(mbox, 0, sizeof(*mbox)); sli4_params = &mbox->u.mqe.un.sli4_params; diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index f3cfbe2ce986..f2b1bbcb196f 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h @@ -50,7 +50,7 @@ * and subcategory. The event type must come first. * The subcategory further defines the data that follows in the rest * of the payload. Each category will have its own unique header plus - * any addtional data unique to the subcategory. + * any additional data unique to the subcategory. * The payload sent via the fc transport is one-way driver->application. */ diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d85a7423a694..0d92d4205ea6 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* no need to reg_login if we are already in one of these states */ + /* + * Need to unreg_login if we are already in one of these states and + * change to NPR state. This will block the port until after the ACC + * completes and the reg_login is issued and completed. + */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) @@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); - return 1; + lpfc_unreg_rpi(vport, ndlp); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } if ((vport->fc_flag & FC_PT2PT) && @@ -653,7 +658,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; } /** - * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd. + * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd. * @phba : Pointer to lpfc_hba structure. * @vport: Pointer to lpfc_vport structure. * @rpi : rpi to be release. diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c97751c95d77..fe7cc84e773b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -577,7 +577,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) iocb->un.fcpi64.bdl.addrHigh = 0; iocb->ulpBdeCount = 0; iocb->ulpLe = 0; - /* fill in responce BDE */ + /* fill in response BDE */ iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = @@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) } /** + * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->rdata && psb->rdata->pnode + && psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + +/** * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. @@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, psb->status = IOSTAT_SUCCESS; spin_unlock( &phba->sli4_hba.abts_scsi_buf_list_lock); - ndlp = psb->rdata->pnode; + if (psb->rdata && psb->rdata->pnode) + ndlp = psb->rdata->pnode; + else + ndlp = NULL; + rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); if (ndlp) @@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - struct lpfc_scsi_buf *lpfc_cmd = NULL; - struct lpfc_scsi_buf *start_lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; + struct lpfc_scsi_buf *lpfc_cmd ; unsigned long iflag = 0; int found = 0; spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); - while (!found && lpfc_cmd) { + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, + list) { if (lpfc_test_rrq_active(phba, ndlp, - lpfc_cmd->cur_iocbq.sli4_xritag)) { - lpfc_release_scsi_buf_s4(phba, lpfc_cmd); - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, - struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); - if (lpfc_cmd == start_lpfc_cmd) { - lpfc_cmd = NULL; - break; - } else - continue; - } + lpfc_cmd->cur_iocbq.sli4_xritag)) + continue; + list_del(&lpfc_cmd->list); found = 1; lpfc_cmd->seg_cnt = 0; lpfc_cmd->nonsg_phys = 0; lpfc_cmd->prot_seg_cnt = 0; + break; } - return lpfc_cmd; + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, + iflag); + if (!found) + return NULL; + else + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1194,10 +1217,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) (2 * sizeof(struct ulp_bde64))); data_bde->addrHigh = putPaddrHigh(physaddr); data_bde->addrLow = putPaddrLow(physaddr); - /* ebde count includes the responce bde and data bpl */ + /* ebde count includes the response bde and data bpl */ iocb_cmd->unsli3.fcp_ext.ebde_count = 2; } else { - /* ebde count includes the responce bde and data bdes */ + /* ebde count includes the response bde and data bdes */ iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } } else { @@ -1491,10 +1514,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct lpfc_pde5 *pde5 = NULL; struct lpfc_pde6 *pde6 = NULL; - struct ulp_bde64 *prot_bde = NULL; + struct lpfc_pde7 *pde7 = NULL; dma_addr_t dataphysaddr, protphysaddr; unsigned short curr_data = 0, curr_prot = 0; - unsigned int split_offset, protgroup_len; + unsigned int split_offset; + unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgrp_blks, protgrp_bytes; unsigned int remainder, subtotal; int status; @@ -1562,23 +1586,33 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bpl++; /* setup the first BDE that points to protection buffer */ - prot_bde = (struct ulp_bde64 *) bpl; - protphysaddr = sg_dma_address(sgpe); - prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); - prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); - protgroup_len = sg_dma_len(sgpe); + protphysaddr = sg_dma_address(sgpe) + protgroup_offset; + protgroup_len = sg_dma_len(sgpe) - protgroup_offset; /* must be integer multiple of the DIF block length */ BUG_ON(protgroup_len % 8); + pde7 = (struct lpfc_pde7 *) bpl; + memset(pde7, 0, sizeof(struct lpfc_pde7)); + bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); + + pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); + pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); + protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; - prot_bde->tus.f.bdeSize = protgroup_len; - prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; - prot_bde->tus.w = le32_to_cpu(bpl->tus.w); + /* check if this pde is crossing the 4K boundary; if so split */ + if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { + protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); + protgroup_offset += protgroup_remainder; + protgrp_blks = protgroup_remainder / 8; + protgrp_bytes = protgroup_remainder * blksize; + } else { + protgroup_offset = 0; + curr_prot++; + } - curr_prot++; num_bde++; /* setup BDE's for data blocks associated with DIF data */ @@ -1630,6 +1664,13 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, } + if (protgroup_offset) { + /* update the reference tag */ + reftag += protgrp_blks; + bpl++; + continue; + } + /* are we done ? */ if (curr_prot == protcnt) { alldone = 1; @@ -1652,6 +1693,7 @@ out: return num_bde; } + /* * Given a SCSI command that supports DIF, determine composition of protection * groups involved in setting up buffer lists @@ -1981,12 +2023,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + struct sli4_sge *first_data_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t num_bde = 0; uint32_t dma_len; uint32_t dma_offset = 0; int nseg; + struct ulp_bde64 *bde; /* * There are three possibilities here - use scatter-gather segment, use @@ -2011,7 +2055,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl += 1; - + first_data_sgl = sgl; lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" @@ -2047,6 +2091,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) dma_offset += dma_len; sgl++; } + /* setup the performance hint (first data BDE) if enabled */ + if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) { + bde = (struct ulp_bde64 *) + &(iocb_cmd->unsli3.sli3Words[5]); + bde->addrLow = first_data_sgl->addr_lo; + bde->addrHigh = first_data_sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(first_data_sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } } else { sgl += 1; /* clear the last flag in the fcp_rsp map entry */ @@ -2325,7 +2380,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /* * The cmnd->underflow is the minimum number of bytes that must - * be transfered for this command. Provided a sense condition + * be transferred for this command. Provided a sense condition * is not present, make sure the actual amount transferred is at * least the underflow value or fail. */ @@ -2471,6 +2526,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_worker_wake_up(phba); break; case IOSTAT_LOCAL_REJECT: + case IOSTAT_REMOTE_STOP: + if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || + lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { + cmd->result = ScsiResult(DID_NO_CONNECT, 0); + break; + } if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || @@ -2478,7 +2543,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->result = ScsiResult(DID_REQUEUE, 0); break; } - if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { @@ -2497,7 +2561,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, "on unprotected cmd\n"); } } - + if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) + && (phba->sli_rev == LPFC_SLI_REV4) + && (pnode && NLP_CHK_NODE_ACT(pnode))) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, pnode, + lpfc_cmd->cur_iocbq.sli4_xritag, + 0, 0); + } /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); @@ -2508,9 +2582,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, SAM_STAT_BUSY); - } else { + } else cmd->result = ScsiResult(DID_OK, 0); - } if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { uint32_t *lp = (uint32_t *)cmd->sense_buffer; @@ -2800,7 +2873,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, } /** - * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table + * lpfc_scsi_api_table_setup - Set up scsi api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -3004,11 +3077,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * transport is still transitioning. */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); + cmnd->result = ScsiResult(DID_IMM_RETRY, 0); goto out_fail_command; } if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) - goto out_host_busy; + goto out_tgt_busy; lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); if (lpfc_cmd == NULL) { @@ -3125,6 +3198,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; + out_tgt_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + out_fail_command: done(cmnd); return 0; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 5932273870a5..ce645b20a6ad 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -130,7 +130,7 @@ struct lpfc_scsi_buf { dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ /* - * data and dma_handle are the kernel virutal and bus address of the + * data and dma_handle are the kernel virtual and bus address of the * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter * gather bde list that supports the sg_tablesize value. */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a359d2b873ce..dacabbe0a586 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* set consumption flag every once in a while */ if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); - + if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) + bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); /* Update the host index before invoking device */ @@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t adj_xri; struct lpfc_node_rrq *rrq; int empty; + uint32_t did = 0; + + + if (!ndlp) + return -EINVAL; + + if (!phba->cfg_enable_rrq) + return -EINVAL; + + if (phba->pport->load_flag & FC_UNLOADING) { + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + goto out; + } + did = ndlp->nlp_DID; /* * set the active bit even if there is no mem available. */ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (!ndlp) - return -EINVAL; + + if (NLP_CHK_FREE_REQ(ndlp)) + goto out; + + if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) + goto out; + if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) - return -EINVAL; + goto out; + rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); if (rrq) { rrq->send_rrq = send_rrq; @@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, rrq->vport = ndlp->vport; rrq->rxid = rxid; empty = list_empty(&phba->active_rrq_list); - if (phba->cfg_enable_rrq && send_rrq) - /* - * We need the xri before we can add this to the - * phba active rrq list. - */ - rrq->send_rrq = send_rrq; - else - rrq->send_rrq = 0; + rrq->send_rrq = send_rrq; list_add_tail(&rrq->list, &phba->active_rrq_list); if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { phba->hba_flag |= HBA_RRQ_ACTIVE; @@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } return 0; } - return -ENOMEM; +out: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2921 Can't set rrq active xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, did, send_rrq); + return -EINVAL; } /** - * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. + * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @xritag: xri used in this exchange. * @rrq: The RRQ to be cleared. * - * This function is called with hbalock held. This function **/ -static void -__lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) +void +lpfc_clr_rrq_active(struct lpfc_hba *phba, + uint16_t xritag, + struct lpfc_node_rrq *rrq) { uint16_t adj_xri; - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = NULL; - ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); + if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) + ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); /* The target DID could have been swapped (cable swap) * we should use the ndlp from the findnode if it is * available. */ - if (!ndlp) + if ((!ndlp) && rrq->ndlp) ndlp = rrq->ndlp; + if (!ndlp) + goto out; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; } +out: mempool_free(rrq, phba->rrq_pool); } @@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(send_rrq); spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov + 1); list_for_each_entry_safe(rrq, nextrrq, - &phba->active_rrq_list, list) { - if (time_after(jiffies, rrq->rrq_stop_time)) { - list_del(&rrq->list); - if (!rrq->send_rrq) - /* this call will free the rrq */ - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - else { - /* if we send the rrq then the completion handler - * will clear the bit in the xribitmap. - */ - spin_unlock_irqrestore(&phba->hbalock, iflags); - if (lpfc_send_rrq(phba, rrq)) { - lpfc_clr_rrq_active(phba, rrq->xritag, - rrq); - } - spin_lock_irqsave(&phba->hbalock, iflags); - } - } else if (time_before(rrq->rrq_stop_time, next_time)) + &phba->active_rrq_list, list) { + if (time_after(jiffies, rrq->rrq_stop_time)) + list_move(&rrq->list, &send_rrq); + else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); + list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { + list_del(&rrq->list); + if (!rrq->send_rrq) + /* this call will free the rrq */ + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + else if (lpfc_send_rrq(phba, rrq)) { + /* if we send the rrq then the completion handler + * will clear the bit in the xribitmap. + */ + lpfc_clr_rrq_active(phba, rrq->xritag, + rrq); + } + } } /** @@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) /** * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. * @vport: Pointer to vport context object. - * - * Remove all active RRQs for this vport from the phba->active_rrq_list and - * clear the rrq. + * @ndlp: Pointer to the lpfc_node_list structure. + * If ndlp is NULL Remove all active RRQs for this vport from the + * phba->active_rrq_list and clear the rrq. + * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. **/ void -lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport) +lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; - spin_lock_irqsave(&phba->hbalock, iflags); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { - if (rrq->vport == vport) { - list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - } + if (!ndlp) { + lpfc_sli4_vport_delete_els_xri_aborted(vport); + lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } + spin_lock_irqsave(&phba->hbalock, iflags); + list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) + if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) + list_move(&rrq->list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { + list_del(&rrq->list); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } } /** @@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov * 2); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { + list_splice_init(&phba->active_rrq_list, &rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } - spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); } /** - * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. + * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. * @phba: Pointer to HBA context object. * @ndlp: Targets nodelist pointer for this exchange. * @xritag the xri in the bitmap to test. @@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) * returns 0 = rrq not active for this xri * 1 = rrq is valid for this xri. **/ -static int -__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, +int +lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag) { uint16_t adj_xri; @@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } /** - * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @xritag: xri used in this exchange. - * @rrq: The RRQ to be cleared. - * - * This function is takes the hbalock. - **/ -void -lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) -{ - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - __lpfc_clr_rrq_active(phba, xritag, rrq); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return; -} - - - -/** - * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @ndlp: Targets nodelist pointer for this exchange. - * @xritag the xri in the bitmap to test. - * - * This function takes the hbalock. - * returns 0 = rrq not active for this xri - * 1 = rrq is valid for this xri. - **/ -int -lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - uint16_t xritag) -{ - int ret; - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - ret = __lpfc_test_rrq_active(phba, ndlp, xritag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return ret; -} - -/** * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. @@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { + if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ @@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } else { sglq->state = SGL_FREED; sglq->ndlp = NULL; - list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_sgl_list); /* Check if TXQ queue needs to be serviced */ if (pring->txq_cnt) @@ -2828,7 +2817,7 @@ void lpfc_poll_eratt(unsigned long ptr) * This function is called from the interrupt context when there is a ring * event for the fcp ring. The caller does not hold any lock. * The function processes each response iocb in the response ring until it - * finds an iocb with LE bit set and chains all the iocbs upto the iocb with + * finds an iocb with LE bit set and chains all the iocbs up to the iocb with * LE bit set. The function will call the completion handler of the command iocb * if the response iocb indicates a completion for a command iocb or it is * an abort completion. The function will call lpfc_sli_process_unsol_iocb @@ -3488,7 +3477,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) int retval = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return 1; /* * Check status register every 100ms for 5 retries, then every @@ -3513,7 +3503,10 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) { + retval = 1; + break; + } } /* Check to see if any errors occurred during init */ @@ -3595,7 +3588,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) uint32_t __iomem *resp_buf; uint32_t __iomem *mbox_buf; volatile uint32_t mbox; - uint32_t hc_copy; + uint32_t hc_copy, ha_copy, resp_data; int i; uint8_t hdrtype; @@ -3612,12 +3605,15 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) resp_buf = phba->MBslimaddr; /* Disable the error attention */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + return; writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); readl(phba->HCregaddr); /* flush */ phba->link_flag |= LS_IGNORE_ERATT; - if (readl(phba->HAregaddr) & HA_ERATT) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (ha_copy & HA_ERATT) { /* Clear Chip error bit */ writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; @@ -3631,11 +3627,18 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mbox_buf = phba->MBslimaddr; writel(mbox, mbox_buf); - for (i = 0; - readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) - mdelay(1); - - if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { + for (i = 0; i < 50; i++) { + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) + mdelay(1); + else + break; + } + resp_data = 0; + if (lpfc_readl((resp_buf + 1), &resp_data)) + return; + if (resp_data != ~(BARRIER_TEST_PATTERN)) { if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; @@ -3644,13 +3647,26 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) } ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; - for (i = 0; readl(resp_buf) != mbox && i < 500; i++) - mdelay(1); + resp_data = 0; + for (i = 0; i < 500; i++) { + if (lpfc_readl(resp_buf, &resp_data)) + return; + if (resp_data != mbox) + mdelay(1); + else + break; + } clear_errat: - while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) - mdelay(1); + while (++i < 500) { + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return; + if (!(ha_copy & HA_ERATT)) + mdelay(1); + else + break; + } if (readl(phba->HAregaddr) & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); @@ -3697,7 +3713,11 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) /* Disable the error attention */ spin_lock_irq(&phba->hbalock); - status = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &status)) { + spin_unlock_irq(&phba->hbalock); + mempool_free(pmb, phba->mbox_mem_pool); + return 1; + } status &= ~HC_ERINT_ENA; writel(status, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -3731,11 +3751,12 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) * 3 seconds we still set HBA_ERROR state because the status of the * board is now undefined. */ - ha_copy = readl(phba->HAregaddr); - + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; while ((i++ < 30) && !(ha_copy & HA_ERATT)) { mdelay(100); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return 1; } del_timer_sync(&psli->mbox_tmo); @@ -4029,7 +4050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) uint32_t status, i = 0; /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; /* Check status register to see what current state is */ i = 0; @@ -4084,7 +4106,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) lpfc_sli_brdrestart(phba); } /* Read the HBA Host Status Register */ - status = readl(phba->HSregaddr); + if (lpfc_readl(phba->HSregaddr, &status)) + return -EIO; } /* Check to see if any errors occurred during init */ @@ -4817,7 +4840,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0378 No support for fcpi mode.\n"); ftr_rsp++; } - + if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) + phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; /* * If the port cannot support the host's requested features * then turn off the global config parameters to disable the @@ -5004,7 +5030,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); - rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) + rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); out_unset_queue: /* Unset all the queues set up in this routine when error out */ if (rc) @@ -5090,7 +5117,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) /* Setting state unknown so lpfc_sli_abort_iocb_ring * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing - * it to fail all oustanding SCSI IO. + * it to fail all outstanding SCSI IO. */ spin_lock_irq(&phba->pport->work_port_lock); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; @@ -5143,7 +5170,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; uint32_t status, evtctr; - uint32_t ha_copy; + uint32_t ha_copy, hc_copy; int i; unsigned long timeout; unsigned long drvr_flag = 0; @@ -5209,15 +5236,17 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } - if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && - !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { + if (lpfc_readl(phba->HCregaddr, &hc_copy) || + !(hc_copy & HC_MBINT_ENA)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):2528 Mailbox command x%x cannot " "issue Data: x%x x%x\n", pmbox->vport ? pmbox->vport->vpi : 0, pmbox->u.mb.mbxCommand, psli->sli_flag, flag); - goto out_not_finished; + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { @@ -5415,11 +5444,19 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = le32_to_cpu(word0); } else { /* First read mbox status word */ - word0 = readl(phba->MBslimaddr); + if (lpfc_readl(phba->MBslimaddr, &word0)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mb->mbxCommand) * 1000) + jiffies; @@ -5470,7 +5507,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, word0 = readl(phba->MBslimaddr); } /* Read the HBA Host Attention Register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) { + spin_unlock_irqrestore(&phba->hbalock, + drvr_flag); + goto out_not_finished; + } } if (psli->sli_flag & LPFC_SLI_ACTIVE) { @@ -5990,7 +6031,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) } /** - * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table + * lpfc_mbox_api_table_setup - Set up mbox api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -6270,7 +6311,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_last, sgl, 1); else bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); /* swap the size field back to the cpu so we * can assign it to the sgl. */ @@ -6290,6 +6330,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, bf_set(lpfc_sli4_sge_offset, sgl, offset); offset += bde.tus.f.bdeSize; } + sgl->word2 = cpu_to_le32(sgl->word2); bpl++; sgl++; } @@ -6535,9 +6576,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / sizeof(struct ulp_bde64); for (i = 0; i < numBdes; i++) { - if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) - break; bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + break; xmit_len += bde.tus.f.bdeSize; } /* word3 iocb=IO_TAG wqe=request_payload_len */ @@ -6627,15 +6668,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, xritag = 0; break; case CMD_XMIT_BLS_RSP64_CX: - /* As BLS ABTS-ACC WQE is very different from other WQEs, + /* As BLS ABTS RSP WQE is very different from other WQEs, * we re-construct this WQE here based on information in * iocbq from scratch. */ memset(wqe, 0, sizeof(union lpfc_wqe)); /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); - if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == + bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); + if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == LPFC_ABTS_UNSOL_INT) { /* ABTS sent by initiator to CT exchange, the * RX_ID field will be filled with the newly @@ -6649,7 +6690,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * RX_ID from ABTS. */ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, - bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); + bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); } bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); @@ -6660,6 +6701,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, LPFC_WQE_LENLOC_NONE); /* Overwrite the pre-set comnd type with OTHER_COMMAND */ command_type = OTHER_COMMAND; + if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { + bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, + bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); + bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, + bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); + } + break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ @@ -6708,7 +6758,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if (piocb->sli4_xritag == NO_XRI) { if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || + piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX) sglq = NULL; else { if (pring->txq_cnt) { @@ -6796,7 +6847,7 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, } /** - * lpfc_sli_api_table_setup - Set up sli api fucntion jump table + * lpfc_sli_api_table_setup - Set up sli api function jump table * @phba: The hba struct for which this call is being executed. * @dev_grp: The HBA PCI-Device group number. * @@ -7470,7 +7521,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; - /* Search postbufq, from the begining, looking for a match on tag */ + /* Search postbufq, from the beginning, looking for a match on tag */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->buffer_tag == tag) { @@ -7514,7 +7565,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; - /* Search postbufq, from the begining, looking for a match on phys */ + /* Search postbufq, from the beginning, looking for a match on phys */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->phys == phys) { @@ -8201,7 +8252,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->iocb_flag &= ~LPFC_IO_WAKE; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8243,7 +8295,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - creg_val = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &creg_val)) + return IOCB_ERROR; creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8385,7 +8438,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int @@ -8394,10 +8447,13 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) uint32_t ha_copy; /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_err; + if (ha_copy & HA_ERATT) { /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_err; /* Check if there is a deferred error condition is active */ if ((HS_FFER1 & phba->work_hs) && @@ -8416,6 +8472,15 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) return 1; } return 0; + +unplug_err: + /* Set the driver HS work bitmap */ + phba->work_hs |= UNPLUG_ERR; + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; } /** @@ -8426,7 +8491,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) * for possible error attention events. The caller must hold the hostlock * with spin_lock_irq(). * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ static int @@ -8443,8 +8508,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); switch (if_type) { case LPFC_SLI_INTF_IF_TYPE_0: - uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); - uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, + &uerr_sta_lo) || + lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, + &uerr_sta_hi)) { + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8463,9 +8535,15 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) } break; case LPFC_SLI_INTF_IF_TYPE_2: - portstat_reg.word0 = - readl(phba->sli4_hba.u.if_type2.STATUSregaddr); - portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0) || + lpfc_readl(phba->sli4_hba.PSMPHRregaddr, + &portsmphr)){ + phba->work_hs |= UNPLUG_ERR; + phba->work_ha |= HA_ERATT; + phba->hba_flag |= HBA_ERATT_HANDLED; + return 1; + } if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { phba->work_status[0] = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); @@ -8503,7 +8581,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) * This function is called from timer soft interrupt context to check HBA's * error attention register bit for error attention events. * - * This fucntion returns 1 when there is Error Attention in the Host Attention + * This function returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ int @@ -8646,7 +8724,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + goto unplug_error; /* If somebody is waiting to handle an eratt don't process it * here. The brdkill function will do this. */ @@ -8672,7 +8751,9 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } /* Clear up only attention source related to slow-path */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) + goto unplug_error; + writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -8695,7 +8776,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) */ spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag &= ~LPFC_PROCESS_LA; - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; control &= ~HC_LAINT_ENA; writel(control, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ @@ -8715,7 +8797,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) status >>= (4*LPFC_ELS_RING); if (status & HA_RXMASK) { spin_lock_irqsave(&phba->hbalock, iflag); - control = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &control)) + goto unplug_error; lpfc_debugfs_slow_ring_trc(phba, "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", @@ -8748,7 +8831,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } spin_lock_irqsave(&phba->hbalock, iflag); if (work_ha_copy & HA_ERATT) { - lpfc_sli_read_hs(phba); + if (lpfc_sli_read_hs(phba)) + goto unplug_error; /* * Check if there is a deferred error condition * is active @@ -8879,6 +8963,9 @@ send_current_mbox: lpfc_worker_wake_up(phba); } return IRQ_HANDLED; +unplug_error: + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_HANDLED; } /* lpfc_sli_sp_intr_handler */ @@ -8926,7 +9013,8 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ - ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &ha_copy)) + return IRQ_HANDLED; /* Clear up only attention source related to fast-path */ spin_lock_irqsave(&phba->hbalock, iflag); /* @@ -9011,7 +9099,11 @@ lpfc_sli_intr_handler(int irq, void *dev_id) return IRQ_NONE; spin_lock(&phba->hbalock); - phba->ha_copy = readl(phba->HAregaddr); + if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } + if (unlikely(!phba->ha_copy)) { spin_unlock(&phba->hbalock); return IRQ_NONE; @@ -9033,7 +9125,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) } /* Clear attention sources except link and error attentions */ - hc_copy = readl(phba->HCregaddr); + if (lpfc_readl(phba->HCregaddr, &hc_copy)) { + spin_unlock(&phba->hbalock); + return IRQ_HANDLED; + } writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), phba->HCregaddr); @@ -9589,7 +9684,7 @@ out: * @cq: Pointer to the completion queue. * @wcqe: Pointer to a completion queue entry. * - * This routine process a slow-path work-queue or recieve queue completion queue + * This routine process a slow-path work-queue or receive queue completion queue * entry. * * Return: true if work posted to worker thread, otherwise false. @@ -10410,7 +10505,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -10420,11 +10514,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, LPFC_MBOX_OPCODE_CQ_CREATE, length, LPFC_SLI4_MBX_EMBED); cq_create = &mbox->u.mqe.un.cq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, cq->page_count); bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); - bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.cqv); + if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { + bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, + eq->queue_id); + } else { + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, + eq->queue_id); + } switch (cq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10456,7 +10561,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10478,6 +10582,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->type = type; cq->subtype = subtype; cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->assoc_qid = eq->queue_id; cq->host_index = 0; cq->hba_index = 0; @@ -10521,20 +10626,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); switch (mq->entry_count) { case 16: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10592,6 +10697,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, length, LPFC_SLI4_MBX_EMBED); mq_create_ext = &mbox->u.mqe.un.mq_create_ext; + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, mq->page_count); bf_set(lpfc_mbx_mq_create_ext_async_evt_link, @@ -10604,9 +10710,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, &mq_create_ext->u.request, 1); bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, &mq_create_ext->u.request, 1); - bf_set(lpfc_mq_context_cq_id, - &mq_create_ext->u.request.context, cq->queue_id); bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.mqv); + if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) + bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, + cq->queue_id); + else + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, + cq->queue_id); switch (mq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -10616,20 +10728,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, return -EINVAL; /* otherwise default to smallest count (drop through) */ case 16: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_16); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_32); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_64); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, - LPFC_MQ_CNT_128); + bf_set(lpfc_mq_context_ring_size, + &mq_create_ext->u.request.context, + LPFC_MQ_RING_SIZE_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { @@ -10640,7 +10756,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create_ext->u.response); if (rc != MBX_SUCCESS) { @@ -10672,6 +10787,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, goto out; } mq->type = LPFC_MQ; + mq->assoc_qid = cq->queue_id; mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; @@ -10716,6 +10832,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + struct dma_address *page; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = SLI4_PAGE_SIZE; @@ -10729,20 +10846,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, length, LPFC_SLI4_MBX_EMBED); wq_create = &mbox->u.mqe.un.wq_create; + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.wqv); + if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, + wq->entry_count); + switch (wq->entry_size) { + default: + case 64: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_64); + break; + case 128: + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + break; + } + bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + } else { + page = wq_create->u.request.page; + } list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); - wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); + page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); + page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10759,6 +10898,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, goto out; } wq->type = LPFC_WQ; + wq->assoc_qid = cq->queue_id; wq->subtype = subtype; wq->host_index = 0; wq->hba_index = 0; @@ -10819,37 +10959,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); rq_create = &mbox->u.mqe.un.rq_create; - switch (hrq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2535 Unsupported RQ count. (%d)\n", - hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; + } else { + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, hrq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_HDR_BUF_SIZE); list_for_each_entry(dmabuf, &hrq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = @@ -10859,7 +11013,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -10876,6 +11029,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; @@ -10884,37 +11038,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, LPFC_SLI4_MBX_EMBED); - switch (drq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2536 Unsupported RQ count. (%d)\n", - drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; + bf_set(lpfc_mbox_hdr_version, &shdr->request, + phba->sli4_hba.pc_sli4_params.rqv); + if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { + bf_set(lpfc_rq_context_rqe_count_1, + &rq_create->u.request.context, + hrq->entry_count); + rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; + } else { + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rqe_count, + &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); } bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, cq->queue_id); bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, drq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); list_for_each_entry(dmabuf, &drq->page_list, list) { rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); @@ -10936,6 +11103,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; @@ -11189,7 +11357,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_rq_destroy) - - sizeof(struct mbox_header)); + sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); @@ -11279,7 +11447,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, sizeof(struct lpfc_mbx_post_sgl_pages) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) &mbox->u.mqe.un.post_sgl_pages; @@ -11582,6 +11750,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) static char *rctl_names[] = FC_RCTL_NAMES_INIT; char *type_names[] = FC_TYPE_NAMES_INIT; struct fc_vft_header *fc_vft_hdr; + uint32_t *header = (uint32_t *) fc_hdr; switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ @@ -11630,10 +11799,15 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) default: goto drop; } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s\n", + "2538 Received frame rctl:%s type:%s " + "Frame Data:%08x %08x %08x %08x %08x %08x\n", rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); + type_names[fc_hdr->fh_type], + be32_to_cpu(header[0]), be32_to_cpu(header[1]), + be32_to_cpu(header[2]), be32_to_cpu(header[3]), + be32_to_cpu(header[4]), be32_to_cpu(header[5])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -11930,17 +12104,17 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, } /** - * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler + * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler * @phba: Pointer to HBA context object. * @cmd_iocbq: pointer to the command iocbq structure. * @rsp_iocbq: pointer to the response iocbq structure. * - * This function handles the sequence abort accept iocb command complete + * This function handles the sequence abort response iocb command complete * event. It properly releases the memory allocated to the sequence abort * accept iocb. **/ static void -lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmd_iocbq, struct lpfc_iocbq *rsp_iocbq) { @@ -11949,15 +12123,15 @@ lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, } /** - * lpfc_sli4_seq_abort_acc - Accept sequence abort + * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort * @phba: Pointer to HBA context object. * @fc_hdr: pointer to a FC frame header. * - * This function sends a basic accept to a previous unsol sequence abort + * This function sends a basic response to a previous unsol sequence abort * event after aborting the sequence handling. **/ static void -lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, +lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) { struct lpfc_iocbq *ctiocb = NULL; @@ -11965,6 +12139,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, uint16_t oxid, rxid; uint32_t sid, fctl; IOCB_t *icmd; + int rc; if (!lpfc_is_link_up(phba)) return; @@ -11985,7 +12160,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, + phba->sli4_hba.max_cfg_param.xri_base)) lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); - /* Allocate buffer for acc iocb */ + /* Allocate buffer for rsp iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) return; @@ -12010,32 +12185,54 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; - ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; + ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; + ctiocb->sli4_xritag = NO_XRI; + + /* If the oxid maps to the FCP XRI range or if it is out of range, + * send a BLS_RJT. The driver no longer has that exchange. + * Override the IOCB for a BA_RJT. + */ + if (oxid > (phba->sli4_hba.max_cfg_param.max_xri + + phba->sli4_hba.max_cfg_param.xri_base) || + oxid > (lpfc_sli4_get_els_iocb_cnt(phba) + + phba->sli4_hba.max_cfg_param.xri_base)) { + icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; + bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); + bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); + bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); + } if (fctl & FC_FC_EX_CTX) { /* ABTS sent by responder to CT exchange, construction * of BA_ACC will use OX_ID from ABTS for the XRI_TAG * field and RX_ID from ABTS for RX_ID field. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); - ctiocb->sli4_xritag = oxid; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); } else { /* ABTS sent by initiator to CT exchange, construction * of BA_ACC will need to allocate a new XRI as for the * XRI_TAG and RX_ID fields. */ - bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); - bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); - ctiocb->sli4_xritag = NO_XRI; + bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); + bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI); } - bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); + bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); - /* Xmit CT abts accept on exchange <xid> */ + /* Xmit CT abts response on exchange <xid> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", - CMD_XMIT_BLS_RSP64_CX, phba->link_state); - lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "2925 Failed to issue CT ABTS RSP x%x on " + "xri x%x, Data x%x\n", + icmd->un.xseq64.w5.hcsw.Rctl, oxid, + phba->link_state); + lpfc_sli_release_iocbq(phba, ctiocb); + } } /** @@ -12083,7 +12280,7 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, lpfc_in_buf_free(phba, &dmabuf->dbuf); } /* Send basic accept (BA_ACC) to the abort requester */ - lpfc_sli4_seq_abort_acc(phba, &fc_hdr); + lpfc_sli4_seq_abort_rsp(phba, &fc_hdr); } /** @@ -12402,7 +12599,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, sizeof(struct lpfc_mbx_post_hdr_tmpl) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, hdr_tmpl, rpi_page->page_count); bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, @@ -12773,7 +12971,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, * record and processing it one at a time starting from the @fcf_index * for initial FCF discovery or fast FCF failover rediscovery. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int @@ -12834,7 +13032,7 @@ fail_fcf_scan: * This routine is invoked to read an FCF record indicated by @fcf_index * and to use it for FLOGI roundrobin FCF failover. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int @@ -12880,7 +13078,7 @@ fail_fcf_read: * This routine is invoked to read an FCF record indicated by @fcf_index to * determine whether it's eligible for FLOGI roundrobin failover list. * - * Return 0 if the mailbox command is submitted sucessfully, none 0 + * Return 0 if the mailbox command is submitted successfully, none 0 * otherwise. **/ int diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index c7217d579e0f..1a3cbf88f2ce 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * + * Copyright (C) 2009-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -125,9 +125,9 @@ struct lpfc_queue { uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t queue_id; /* Queue ID assigned by the hardware */ + uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ struct list_head page_list; uint32_t page_count; /* Number of pages allocated for this queue */ - uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ union sli4_qe qe[1]; /* array to index entries (must be last) */ @@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params { uint32_t hdr_pp_align; uint32_t sgl_pages_max; uint32_t sgl_pp_align; + uint8_t cqv; + uint8_t mqv; + uint8_t wqv; + uint8_t rqv; }; /* SLI4 HBA data structure entries */ @@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); +void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *); int lpfc_sli4_brdreset(struct lpfc_hba *); int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 386cf92de492..2404d1d65563 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.20" +#define LPFC_DRIVER_VERSION "8.3.22" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 6b8d2952e32f..30ba5440c67a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport) struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; long timeout; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp = lpfc_findnode_did(vport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) @@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport) * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, |