summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2011-07-23 02:37:52 +0400
committerJames Bottomley <JBottomley@Parallels.com>2011-07-27 15:16:05 +0400
commit7d791df730844932feeac3bbf3ac08b510682174 (patch)
tree8a1dd1666dc7df7518368327b7eff45719adeae1
parentb76f2dc91c0fff7a66616affdc039dc2e4b7ff98 (diff)
downloadlinux-7d791df730844932feeac3bbf3ac08b510682174.tar.xz
[SCSI] lpfc 8.3.25: Add FCF priority failover functionality
This patch implements a new FCF failover policy for the lpfc driver. It allows the driver to choose which FCF to failover to based on the FCF priority. This patch also introduces a new sysfs parameter (fcf_failover_policy) to allow the user to choose which FCF failover policy to use. Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c220
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h27
8 files changed, 377 insertions, 20 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b932067bf2d8..c088a36d1f33 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -680,6 +680,9 @@ struct lpfc_hba {
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1 /* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
+ uint32_t cfg_fcf_failover_policy;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fa5bc146d67b..2542f1f8bf86 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2193,6 +2193,9 @@ lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+ "FCF Fast failover=1 Priority failover=2");
+
int lpfc_enable_rrq;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -3775,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_lpfc_enable_npiv,
+ &dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
@@ -4995,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+ lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 1f3cad0b1ce8..a6db6aef1331 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1725b81770e9..023da0e00d38 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -874,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
+ lpfc_sli4_set_fcf_flogi_fail(phba,
+ phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
if (rc)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bef17e3e419a..0b47adf9fee8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
return;
}
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ list_del_init(&fcf_pri->list);
+ fcf_pri->fcf_rec.flag = 0;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
static void
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */
- if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
goto fail_out;
/* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record
+ )
+{
+ struct lpfc_fcf_pri *fcf_pri;
+
+ fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ fcf_pri->fcf_rec.fcf_index = fcf_index;
+ /* FCF record priority */
+ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
* @fcf: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
fcf_rec->addr_mode = addr_mode;
fcf_rec->vlan_id = vlan_id;
fcf_rec->flag |= (flag | RECORD_VALID);
+ __lpfc_update_fcf_record_pri(phba,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+ new_fcf_record);
}
/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
return false;
if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
return false;
+ if (fcf_rec->priority != new_fcf_record->fip_priority)
+ return false;
return true;
}
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
}
/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+ uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3058 deleting idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_pri->fcf_rec.priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+ if (phba->fcf.current_rec.priority ==
+ new_fcf_pri->fcf_rec.priority)
+ phba->fcf.eligible_fcf_cnt--;
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ spin_lock_irq(&phba->hbalock);
+ new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record)
+{
+ uint16_t current_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ struct lpfc_fcf_pri *new_fcf_pri;
+ int ret;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3059 adding idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_record->fip_priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+ new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ ret = 0; /* Empty rr list */
+ goto out;
+ }
+ current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+ if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ /* fcfs_at_this_priority_level = 1; */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else
+ /* fcfs_at_this_priority_level++; */
+ phba->fcf.eligible_fcf_cnt++;
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ if (new_fcf_pri->fcf_rec.priority <=
+ fcf_pri->fcf_rec.priority) {
+ if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+ list_add(&new_fcf_pri->list,
+ &phba->fcf.fcf_pri_list);
+ else
+ list_add(&new_fcf_pri->list,
+ &((struct lpfc_fcf_pri *)
+ fcf_pri->list.prev)->list);
+ ret = 0;
+ goto out;
+ } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+ || new_fcf_pri->fcf_rec.priority <
+ next_fcf_pri->fcf_rec.priority) {
+ list_add(&new_fcf_pri->list, &fcf_pri->list);
+ ret = 0;
+ goto out;
+ }
+ if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+ continue;
+
+ }
+ ret = 1;
+out:
+ /* we use = instead of |= to clear the FLOGI_FAILED flag. */
+ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+ spin_unlock_irq(&phba->hbalock);
+ return ret;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* record for roundrobin FCF failover.
*/
if (!rc) {
+ lpfc_sli4_fcf_pri_list_del(phba,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2781 FCF (x%x) failed connection "
"list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto read_next_fcf;
} else {
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+ new_fcf_record);
if (rc)
goto read_next_fcf;
}
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
- if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, vlan_id)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
(phba->fcf.fcf_flag & FCF_REDISC_PEND))
return;
- if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ phba->fcf.fcf_flag & FCF_IN_USE) {
/*
* In case the current in-use FCF record no
* longer existed during FCF discovery that
@@ -2423,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Update the eligible FCF record index bmask */
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
out:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4c58402fa698..a3c820083c36 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3634,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_sli4_fcf_dead_failthrough(phba);
} else {
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
@@ -3721,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* Reset FCF roundrobin bmask for new
* discovery.
*/
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
break;
default:
@@ -9046,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
}
INIT_LIST_HEAD(&phba->active_rrq_list);
+ INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
/* Set up common device driver resources */
error = lpfc_setup_driver_resource_phase2(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 247c24f438f5..8b799f047a99 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -14635,6 +14635,92 @@ fail_fcf_read:
}
/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ int rc;
+ int ret = 0;
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3060 Last IDX %d\n", last_index);
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "3061 Last IDX %d\n", last_index);
+ return 0; /* Empty rr list */
+ }
+ next_fcf_pri = 0;
+ /*
+ * Clear the rr_bmask and set all of the bits that are at this
+ * priority.
+ */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+ continue;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ /*
+ * if next_fcf_pri was not set above and the list is not empty then
+ * we have failed flogis on all of them. So reset flogi failed
+ * and start at the begining.
+ */
+ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ } else
+ ret = 1;
+ spin_unlock_irq(&phba->hbalock);
+
+ return ret;
+}
+/**
* lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
@@ -14650,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
uint16_t next_fcf_index;
/* Search start from next bit of currently registered FCF index */
+next_priority:
next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14657,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
next_fcf_index);
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ /*
+ * If we have wrapped then we need to clear the bits that
+ * have been tested so that we can detect when we should
+ * change the priority level.
+ */
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ }
+
/* Check roundrobin failover list empty condition */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+ next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+ /*
+ * If next fcf index is not found check if there are lower
+ * Priority level fcf's in the fcf_priority list.
+ * Set up the rr_bmask with all of the avaiable fcf bits
+ * at that level and continue the selection process.
+ */
+ if (lpfc_check_next_fcf_pri_level(phba))
+ goto next_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
- return LPFC_FCOE_FCF_NEXT_NONE;
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3063 Only FCF available idx %d, flag %x\n",
+ next_fcf_index,
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+ return next_fcf_index;
+ }
}
+ if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+ LPFC_FCF_FLOGI_FAILED)
+ goto next_priority;
+
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2845 Get next roundrobin failover FCF (x%x)\n",
next_fcf_index);
@@ -14719,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
+ struct lpfc_fcf_pri *fcf_pri;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -14727,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
return;
}
/* Clear the eligible FCF record index bmask */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+ list_del_init(&fcf_pri->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 88387c1c2dcc..19bb87ae8597 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -159,6 +159,25 @@ struct lpfc_fcf_rec {
#define RECORD_VALID 0x02
};
+struct lpfc_fcf_pri_rec {
+ uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+ uint16_t flag;
+ uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+ struct list_head list;
+ struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
+
struct lpfc_fcf {
uint16_t fcfi;
uint32_t fcf_flag;
@@ -178,15 +197,13 @@ struct lpfc_fcf {
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
+ struct list_head fcf_pri_list;
+ struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+ uint32_t current_fcf_scan_pri;
struct timer_list redisc_wait;
unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
};
-/*
- * Maximum FCF table index, it is for driver internal book keeping, it
- * just needs to be no less than the supported HBA's FCF table size.
- */
-#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1