diff options
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 26 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 138 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 3 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 35 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 86 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 55 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 10 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 10 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 20 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 85 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 6 |
14 files changed, 316 insertions, 171 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index e5a9c5a323f8..fe4fb67eb50c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -74,8 +74,7 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -/* 1 Second */ -#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -1715,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba) * Note: If no valid cpu found, then nr_cpu_ids is returned. * **/ -static inline unsigned int +static __always_inline unsigned int lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) { - unsigned int cpu_it; - - for_each_cpu_wrap(cpu_it, mask, start) { - if (cpu_online(cpu_it)) - break; - } - - return cpu_it; + return cpumask_next_and_wrap(start, mask, cpu_online_mask); } + /** * lpfc_next_present_cpu - Finds next present CPU after n * @n: the cpu prior to search @@ -1734,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start) * Note: If no next present cpu, then fallback to first present cpu. * **/ -static inline unsigned int lpfc_next_present_cpu(int n) +static __always_inline unsigned int lpfc_next_present_cpu(int n) { - unsigned int cpu; - - cpu = cpumask_next(n, cpu_present_mask); - - if (cpu >= nr_cpu_ids) - cpu = cpumask_first(cpu_present_mask); - - return cpu; + return cpumask_next_wrap(n, cpu_present_mask); } /** diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 0d0213bba35d..54ee8ecec3b3 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -291,6 +291,138 @@ buffer_done: return len; } +static ssize_t +lpfc_vmid_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vmid *vmp; + int len = 0, i, j, k, cpu; + char hxstr[LPFC_MAX_VMID_SIZE * 3] = {0}; + struct timespec64 curr_tm; + struct lpfc_vmid_priority_range *vr; + u64 *lta, rct_acc = 0, max_lta = 0; + struct tm tm_val; + + ktime_get_ts64(&curr_tm); + + len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n"); + + /* if enabled continue, else return */ + if (lpfc_is_vmid_enabled(phba)) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: ON\n\n"); + } else { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: OFF\n\n"); + return len; + } + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VMID priority ranges:\n"); + vr = vport->vmid_priority.vmid_range; + for (i = 0; i < vport->vmid_priority.num_descriptors; ++i) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "\t[x%x - x%x], qos: x%x\n", + vr->low, vr->high, vr->qos); + vr++; + } + } + + for (i = 0; i < phba->cfg_max_vmid; i++) { + vmp = &vport->vmid[i]; + max_lta = 0; + + /* only if the slot is used */ + if (!(vmp->flag & LPFC_VMID_SLOT_USED) || + !(vmp->flag & LPFC_VMID_REGISTERED)) + continue; + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VEM ID: %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x\n", + vport->lpfc_vmid_host_uuid[0], + vport->lpfc_vmid_host_uuid[1], + vport->lpfc_vmid_host_uuid[2], + vport->lpfc_vmid_host_uuid[3], + vport->lpfc_vmid_host_uuid[4], + vport->lpfc_vmid_host_uuid[5], + vport->lpfc_vmid_host_uuid[6], + vport->lpfc_vmid_host_uuid[7], + vport->lpfc_vmid_host_uuid[8], + vport->lpfc_vmid_host_uuid[9], + vport->lpfc_vmid_host_uuid[10], + vport->lpfc_vmid_host_uuid[11], + vport->lpfc_vmid_host_uuid[12], + vport->lpfc_vmid_host_uuid[13], + vport->lpfc_vmid_host_uuid[14], + vport->lpfc_vmid_host_uuid[15]); + } + + /* IO stats */ + len += scnprintf(buf + len, PAGE_SIZE - len, + "ID00 READs:%llx WRITEs:%llx\n", + vmp->io_rd_cnt, + vmp->io_wr_cnt); + for (j = 0, k = 0; j < strlen(vmp->host_vmid); j++, k += 3) + sprintf((char *)(hxstr + k), "%2x ", vmp->host_vmid[j]); + /* UUIDs */ + len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr); + + len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n", + vmp->host_vmid); + + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + len += scnprintf(buf + len, PAGE_SIZE - len, + "CS_CTL VMID: 0x%x\n", + vmp->un.cs_ctl_vmid); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "Application id: 0x%x\n", + vmp->un.app_id); + + /* calculate the last access time */ + for_each_possible_cpu(cpu) { + lta = per_cpu_ptr(vmp->last_io_time, cpu); + if (!lta) + continue; + + /* if last access time is less than timeout */ + if (time_after((unsigned long)*lta, jiffies)) + continue; + + if (*lta > max_lta) + max_lta = *lta; + } + + rct_acc = jiffies_to_msecs(jiffies - max_lta) / 1000; + /* current time */ + time64_to_tm(ktime_get_real_seconds(), + -(sys_tz.tz_minuteswest * 60) - rct_acc, &tm_val); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Last Access Time :" + "%ld-%d-%dT%02d:%02d:%02d\n\n", + 1900 + tm_val.tm_year, tm_val.tm_mon + 1, + tm_val.tm_mday, tm_val.tm_hour, + tm_val.tm_min, tm_val.tm_sec); + + if (len >= PAGE_SIZE) + return len; + + memset(hxstr, 0, LPFC_MAX_VMID_SIZE * 3); + } + return len; +} + /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. @@ -2578,7 +2710,7 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr, (old_val & DISABLE_FCP_RING_INT)) { spin_unlock_irq(&phba->hbalock); - del_timer(&phba->fcp_poll_timer); + timer_delete(&phba->fcp_poll_timer); spin_lock_irq(&phba->hbalock); if (lpfc_readl(phba->HCregaddr, &creg_val)) { spin_unlock_irq(&phba->hbalock); @@ -3011,6 +3143,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); +static DEVICE_ATTR_RO(lpfc_vmid_info); #define WWN_SZ 8 /** @@ -6117,6 +6250,7 @@ static struct attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_vmid_inactivity_timeout.attr, &dev_attr_lpfc_vmid_app_header.attr, &dev_attr_lpfc_vmid_priority_tagging.attr, + &dev_attr_lpfc_vmid_info.attr, NULL, }; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index c8f8496bbdf8..d61d979f9b77 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2687,8 +2687,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3258,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 12c67cdd7c19..530dddd39bab 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -3433,7 +3433,8 @@ fdmi_cmd_exit: void lpfc_delayed_disc_tmo(struct timer_list *t) { - struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); + struct lpfc_vport *vport = timer_container_of(vport, t, + delayed_disc_tmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 3fd1aa5cc78c..1b601e45bc45 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -6289,7 +6289,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } phba->nvmeio_trc_on = 1; phba->nvmeio_trc_output_idx = 0; - phba->nvmeio_trc = NULL; } else { nvmeio_off: phba->nvmeio_trc_size = 0; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 1d7db49a8fe4..b1a61eca8295 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -4333,7 +4333,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - del_timer_sync(&nlp->nlp_delayfunc); + timer_delete_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { list_del_init(&nlp->els_retry_evt.evt_listp); @@ -4378,7 +4378,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) void lpfc_els_retry_delay(struct timer_list *t) { - struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); + struct lpfc_nodelist *ndlp = timer_container_of(ndlp, t, + nlp_delayfunc); struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; unsigned long flags; @@ -4431,7 +4432,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) * firing and before processing the timer, cancel the * nlp_delayfunc. */ - del_timer_sync(&ndlp->nlp_delayfunc); + timer_delete_sync(&ndlp->nlp_delayfunc); retry = ndlp->nlp_retry; ndlp->nlp_retry = 0; @@ -8045,8 +8046,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, - jiffies + - msecs_to_jiffies(1000 * tmo)); + jiffies + secs_to_jiffies(tmo)); } return 0; } @@ -8081,7 +8081,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_DISC_TMO, &vport->fc_flag)) { tmo = ((phba->fc_ratov * 3) + 3); mod_timer(&vport->fc_disctmo, - jiffies + msecs_to_jiffies(1000 * tmo)); + jiffies + secs_to_jiffies(tmo)); } if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { @@ -9386,7 +9386,7 @@ out: void lpfc_els_timeout(struct timer_list *t) { - struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); + struct lpfc_vport *vport = timer_container_of(vport, t, els_tmofunc); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; @@ -9511,7 +9511,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) if (!list_empty(&pring->txcmplq)) if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * timeout)); + jiffies + secs_to_jiffies(timeout)); } /** @@ -9569,18 +9569,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags); /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->vport != vport) + continue; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2243 iotag = 0x%x cmd_flag = 0x%x " - "ulp_command = 0x%x this_vport %x " - "sli_flag = 0x%x\n", + "ulp_command = 0x%x sli_flag = 0x%x\n", piocb->iotag, piocb->cmd_flag, get_job_cmnd(phba, piocb), - (piocb->vport == vport), phba->sli.sli_flag); - if (piocb->vport != vport) - continue; - if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) { if (piocb->cmd_flag & LPFC_IO_LIBDFC) continue; @@ -10899,7 +10897,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) "3334 Delay fc port discovery for %d secs\n", phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); + jiffies + secs_to_jiffies(phba->fc_ratov)); return; } @@ -11156,7 +11154,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) if (!ndlp) return; - mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); + mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; @@ -11597,7 +11595,8 @@ err: void lpfc_fabric_block_timeout(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); + struct lpfc_hba *phba = timer_container_of(phba, t, + fabric_block_timer); unsigned long iflags; uint32_t tmo_posted; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 36e66df36a18..3962f07c9140 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -161,7 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; - bool nvme_reg = false; + bool drop_initial_node_ref = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -183,13 +183,19 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Don't schedule a worker thread event if the vport is going down. */ if (test_bit(FC_UNLOADING, &vport->load_flag) || - !test_bit(HBA_SETUP, &phba->hba_flag)) { + (phba->sli_rev == LPFC_SLI_REV4 && + !test_bit(HBA_SETUP, &phba->hba_flag))) { spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; - if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) - nvme_reg = true; + /* Only 1 thread can drop the initial node reference. + * If not registered for NVME and NLP_DROPPED flag is + * clear, remove the initial reference. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + drop_initial_node_ref = true; /* The scsi_transport is done with the rport so lpfc cannot * call to unregister. @@ -200,13 +206,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, * unregister calls were made to the scsi and nvme * transports and refcnt was already decremented. Clear - * the NLP_XPT_REGD flag only if the NVME Rport is + * the NLP_XPT_REGD flag only if the NVME nrport is * confirmed unregistered. */ - if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { - ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); /* may free ndlp */ + + /* Release scsi transport reference */ + lpfc_nlp_put(ndlp); } else { spin_unlock_irqrestore(&ndlp->lock, iflags); } @@ -214,24 +223,24 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_unlock_irqrestore(&ndlp->lock, iflags); } - /* Only 1 thread can drop the initial node reference. If - * another thread has set NLP_DROPPED, this thread is done. - */ - if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag)) - return; - - set_bit(NLP_DROPPED, &ndlp->nlp_flag); - lpfc_nlp_put(ndlp); + if (drop_initial_node_ref) + lpfc_nlp_put(ndlp); return; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; - /* check for recovered fabric node */ - if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && - ndlp->nlp_DID == Fabric_DID) + /* Ignore callback for a mismatched (stale) rport */ + if (ndlp->rport != rport) { + lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE, + "6788 fc rport mismatch: d_id x%06x ndlp x%px " + "fc rport x%px node rport x%px state x%x " + "refcnt %u\n", + ndlp->nlp_DID, ndlp, rport, ndlp->rport, + ndlp->nlp_state, kref_read(&ndlp->kref)); return; + } if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -1222,7 +1231,7 @@ lpfc_linkdown_port(struct lpfc_vport *vport) /* Stop delayed Nport discovery */ clear_bit(FC_DISC_DELAYED, &vport->fc_flag); - del_timer_sync(&vport->delayed_disc_tmo); + timer_delete_sync(&vport->delayed_disc_tmo); if (phba->sli_rev == LPFC_SLI_REV4 && vport->port_type == LPFC_PHYSICAL_PORT && @@ -1412,7 +1421,7 @@ lpfc_linkup(struct lpfc_hba *phba) /* Unblock fabric iocbs if they are blocked */ clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); - del_timer_sync(&phba->fabric_block_timer); + timer_delete_sync(&phba->fabric_block_timer); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -3518,7 +3527,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) if (phba->fc_topology && phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "3314 Toplogy changed was 0x%x is 0x%x\n", + "3314 Topology changed was 0x%x is 0x%x\n", phba->fc_topology, bf_get(lpfc_mbx_read_top_topology, la)); phba->fc_topology_changed = 1; @@ -4689,9 +4698,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { vport->phba->nport_event_cnt++; if (vport->phba->nvmet_support == 0) { - /* Start devloss if target. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) - lpfc_nvme_unregister_port(vport, ndlp); + lpfc_nvme_unregister_port(vport, ndlp); } else { /* NVMET has no upcall. */ lpfc_nlp_put(ndlp); @@ -4973,7 +4980,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); + mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo)); set_bit(FC_DISC_TMO, &vport->fc_flag); /* Start Discovery Timer state <hba_state> */ @@ -5004,7 +5011,7 @@ lpfc_can_disctmo(struct lpfc_vport *vport) if (test_bit(FC_DISC_TMO, &vport->fc_flag) || timer_pending(&vport->fc_disctmo)) { clear_bit(FC_DISC_TMO, &vport->fc_flag); - del_timer_sync(&vport->fc_disctmo); + timer_delete_sync(&vport->fc_disctmo); spin_lock_irqsave(&vport->work_port_lock, iflags); vport->work_port_events &= ~WORKER_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflags); @@ -5047,7 +5054,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->ndlp == ndlp) return 1; - fallthrough; + break; case CMD_ELS_REQUEST64_CR: if (remote_id == ndlp->nlp_DID) return 1; @@ -5495,7 +5502,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; - del_timer_sync(&ndlp->nlp_delayfunc); + timer_delete_sync(&ndlp->nlp_delayfunc); list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); @@ -5564,6 +5571,7 @@ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *np = NULL; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { @@ -5578,14 +5586,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); - return ndlp; + + /* Check for new or potentially stale node */ + if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) + return ndlp; + np = ndlp; } } - /* FIND node did <did> NOT FOUND */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0932 FIND node did x%x NOT FOUND.\n", did); - return NULL; + if (!np) + /* FIND node did <did> NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0932 FIND node did x%x NOT FOUND.\n", did); + + return np; } struct lpfc_nodelist * @@ -6046,7 +6060,7 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) void lpfc_disc_timeout(struct timer_list *t) { - struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); + struct lpfc_vport *vport = timer_container_of(vport, t, fc_disctmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index bcadf11414c8..20fa450def03 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -595,7 +595,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * timeout)); + jiffies + secs_to_jiffies(timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); @@ -604,7 +604,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); if (test_bit(LINK_DISABLED, &phba->hba_flag)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -1196,7 +1196,7 @@ lpfc_hb_timeout(struct timer_list *t) uint32_t tmo_posted; unsigned long iflag; - phba = from_timer(phba, t, hb_tmofunc); + phba = timer_container_of(phba, t, hb_tmofunc); /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); @@ -1228,7 +1228,7 @@ lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; - phba = from_timer(phba, t, rrq_tmr); + phba = timer_container_of(phba, t, rrq_tmr); if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) { clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); return; @@ -1907,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, uint32_t intr_mode; LPFC_MBOXQ_t *mboxq; + /* Notifying the transport that the targets are going offline. */ + lpfc_scsi_dev_block(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { /* @@ -3120,8 +3123,8 @@ lpfc_cleanup(struct lpfc_vport *vport) void lpfc_stop_vport_timers(struct lpfc_vport *vport) { - del_timer_sync(&vport->els_tmofunc); - del_timer_sync(&vport->delayed_disc_tmo); + timer_delete_sync(&vport->els_tmofunc); + timer_delete_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -3140,7 +3143,7 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; /* Now, try to stop the timer */ - del_timer(&phba->fcf.redisc_wait); + timer_delete(&phba->fcf.redisc_wait); } /** @@ -3302,12 +3305,12 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) lpfc_stop_vport_timers(phba->pport); cancel_delayed_work_sync(&phba->eq_delay_work); cancel_delayed_work_sync(&phba->idle_stat_delay_work); - del_timer_sync(&phba->sli.mbox_tmo); - del_timer_sync(&phba->fabric_block_timer); - del_timer_sync(&phba->eratt_poll); - del_timer_sync(&phba->hb_tmofunc); + timer_delete_sync(&phba->sli.mbox_tmo); + timer_delete_sync(&phba->fabric_block_timer); + timer_delete_sync(&phba->eratt_poll); + timer_delete_sync(&phba->hb_tmofunc); if (phba->sli_rev == LPFC_SLI_REV4) { - del_timer_sync(&phba->rrq_tmr); + timer_delete_sync(&phba->rrq_tmr); clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); } clear_bit(HBA_HBEAT_INP, &phba->hba_flag); @@ -3316,7 +3319,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) switch (phba->pci_dev_grp) { case LPFC_PCI_DEV_LP: /* Stop any LightPulse device specific driver timers */ - del_timer_sync(&phba->fcp_poll_timer); + timer_delete_sync(&phba->fcp_poll_timer); break; case LPFC_PCI_DEV_OC: /* Stop any OneConnect device specific driver timers */ @@ -3361,8 +3364,8 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; } spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -5128,7 +5131,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) static void lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); + struct lpfc_hba *phba = timer_container_of(phba, t, fcf.redisc_wait); /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); @@ -5159,7 +5162,8 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) static void lpfc_vmid_poll(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); + struct lpfc_hba *phba = timer_container_of(phba, t, + inactive_vmid_poll); u32 wake_up = 0; /* check if there is a need to issue QFPA */ @@ -6909,7 +6913,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; @@ -7952,11 +7956,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); /* CMF congestion timer */ - hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_timer.function = lpfc_cmf_timer; + hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); /* CMF 1 minute stats collection timer */ - hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; + hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); /* * Control structure for handling external multi-buffer mailbox @@ -12762,7 +12765,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) * timer. Wait for the poll timer to retire. */ synchronize_rcu(); - del_timer_sync(&phba->cpuhp_poll_timer); + timer_delete_sync(&phba->cpuhp_poll_timer); } static void lpfc_cpuhp_remove(struct lpfc_hba *phba) @@ -12873,7 +12876,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) if (offline) { /* Find next online CPU on original mask */ - cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true); + cpu_next = cpumask_next_wrap(cpu, orig_mask); cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next); /* Found a valid CPU */ @@ -13170,6 +13173,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { + free_irq(phba->pcidev->irq, phba); pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); @@ -13250,6 +13254,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { + free_irq(phba->pcidev->irq, phba); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index b1adb9f59097..a6647dd360d1 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -2508,7 +2508,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "6031 RemotePort Registration failed " "err: %d, DID x%06x ref %u\n", ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); - lpfc_nlp_put(ndlp); + + /* Only release reference if one was taken for this request */ + if (!oldrport) + lpfc_nlp_put(ndlp); } return ret; @@ -2614,7 +2617,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * clear any rport state until the transport calls back. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) { + if ((ndlp->nlp_type & NLP_NVME_TARGET) || + (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) { /* No concern about the role change on the nvme remoteport. * The transport will update it. */ diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fba2e62027b7..4cfc928bcf2d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, struct lpfc_nvmet_tgtport *tgtp; struct lpfc_async_xchg_ctx *ctxp = container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); - struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct rqb_dmabuf *nvmebuf; struct lpfc_hba *phba = ctxp->phba; unsigned long iflag; @@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, raw_smp_processor_id()); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + nvmebuf = ctxp->rqb_buffer; if (!nvmebuf) { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6425 Defer rcv: no buffer oxid x%x: " "flg %x ste %x\n", ctxp->oxid, ctxp->flag, ctxp->state); return; } + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); tgtp = phba->targetport->private; if (tgtp) @@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - spin_lock_irqsave(&ctxp->ctxlock, iflag); - ctxp->rqb_buffer = NULL; - spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } /** diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 055ed632c14d..31a9f142bcb9 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -390,6 +390,10 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + /* may be called before queues established if hba_setup fails */ + if (!phba->sli4_hba.hdwq) + return; + spin_lock_irqsave(&phba->hbalock, iflag); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; @@ -5190,7 +5194,7 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba) **/ void lpfc_poll_timeout(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); + struct lpfc_hba *phba = timer_container_of(phba, t, fcp_poll_timer); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, @@ -5488,7 +5492,7 @@ void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) struct lpfc_vmid *cur; if (vport->port_type == LPFC_PHYSICAL_PORT) - del_timer_sync(&vport->phba->inactive_vmid_poll); + timer_delete_sync(&vport->phba->inactive_vmid_poll); kfree(vport->qfpa_res); kfree(vport->vmid_priority.vmid_range); @@ -5645,9 +5649,8 @@ wait_for_cmpl: * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait * for abort to complete. */ - wait_event_timeout(waitq, - (lpfc_cmd->pCmd != cmnd), - msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), + secs_to_jiffies(2*vport->cfg_devloss_tmo)); spin_lock(&lpfc_cmd->buf_lock); @@ -5911,7 +5914,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) * If target is not in a MAPPED state, delay until * target is rediscovered or devloss timeout expires. */ - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies)) { if (!pnode) return FAILED; @@ -5957,7 +5960,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, lpfc_sli_abort_taskmgmt(vport, &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); - later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); @@ -6137,8 +6140,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) wait_event_timeout(waitq, !test_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags), - msecs_to_jiffies(dev_loss_tmo * - 1000)); + secs_to_jiffies(dev_loss_tmo)); if (test_and_clear_bit(NLP_WAIT_FOR_LOGO, &pnode->save_flags)) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 92f3d4423729..47bbcb78fb4d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1025,7 +1025,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) LIST_HEAD(send_rrq); clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag); - next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); spin_lock_irqsave(&phba->rrq_list_lock, iflags); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { @@ -1208,8 +1208,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); + rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1); rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; rrq->rxid = rxid; @@ -1736,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG_ON(!piocb->vport); if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag)) mod_timer(&piocb->vport->els_tmofunc, - jiffies + - msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); + jiffies + secs_to_jiffies(phba->fc_ratov << 1)); } return 0; @@ -3927,13 +3925,20 @@ void lpfc_poll_eratt(struct timer_list *t) uint32_t eratt = 0; uint64_t sli_intr, cnt; - phba = from_timer(phba, t, eratt_poll); - if (!test_bit(HBA_SETUP, &phba->hba_flag)) - return; + phba = timer_container_of(phba, t, eratt_poll); if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return; + if (phba->sli_rev == LPFC_SLI_REV4 && + !test_bit(HBA_SETUP, &phba->hba_flag)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0663 HBA still initializing 0x%lx, restart " + "timer\n", + phba->hba_flag); + goto restart_timer; + } + /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; @@ -3952,14 +3957,16 @@ void lpfc_poll_eratt(struct timer_list *t) /* Check chip HA register for error event */ eratt = lpfc_sli_check_eratt(phba); - if (eratt) + if (eratt) { /* Tell the worker thread there is work to do */ lpfc_worker_wake_up(phba); - else - /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, - jiffies + - msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + return; + } + +restart_timer: + /* Restart the timer for next eratt poll */ + mod_timer(&phba->eratt_poll, + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); return; } @@ -5044,7 +5051,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) return 1; } - del_timer_sync(&psli->mbox_tmo); + timer_delete_sync(&psli->mbox_tmo); if (ha_copy & HA_ERATT) { writel(HA_ERATT, phba->HAregaddr); phba->pport->stopped = 1; @@ -6006,9 +6013,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " @@ -9010,7 +9017,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); + jiffies + secs_to_jiffies(phba->fc_ratov * 2)); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, @@ -9029,7 +9036,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, - jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); /* * The port is ready, set the host's link state to LINK_DOWN @@ -9118,7 +9125,7 @@ out_free_mbox: void lpfc_mbox_timeout(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); + struct lpfc_hba *phba = timer_container_of(phba, t, sli.mbox_tmo); unsigned long iflag; uint32_t tmo_posted; @@ -9506,8 +9513,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * - 1000); + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)); mod_timer(&psli->mbox_tmo, jiffies + timeout); } @@ -9631,8 +9637,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, drvr_flag); goto out_not_finished; } - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies; i = 0; /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || @@ -9758,9 +9763,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; spin_unlock_irq(&phba->hbalock); /* Make sure the mailbox is really active */ @@ -9883,8 +9887,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) } } - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) - * 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies; do { bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); @@ -10232,7 +10235,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); + secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -12083,7 +12086,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) local_bh_enable(); /* Return any active mbox cmds */ - del_timer_sync(&psli->mbox_tmo); + timer_delete_sync(&psli->mbox_tmo); spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; @@ -13161,7 +13164,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = msecs_to_jiffies(timeout * 1000); + timeout_req = secs_to_jiffies(timeout); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -13277,8 +13280,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, /* now issue the command */ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if (retval == MBX_BUSY || retval == MBX_SUCCESS) { - wait_for_completion_timeout(&mbox_done, - msecs_to_jiffies(timeout * 1000)); + wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->ctx_u.mbox_wait = NULL; @@ -13338,9 +13340,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) * command to be gracefully completed by firmware. */ if (phba->sli.mbox_active) - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - phba->sli.mbox_active) * - 1000) + jiffies; + timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active)) + jiffies; spin_unlock_irq(&phba->hbalock); /* Enable softirqs again, done with phba->hbalock */ @@ -13811,7 +13812,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) phba->sli.mbox_active = NULL; spin_unlock_irqrestore(&phba->hbalock, iflag); phba->last_completion_time = jiffies; - del_timer(&phba->sli.mbox_tmo); + timer_delete(&phba->sli.mbox_tmo); if (pmb->mbox_cmpl) { lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); @@ -14311,7 +14312,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) /* Reset heartbeat timer */ phba->last_completion_time = jiffies; - del_timer(&phba->sli.mbox_tmo); + timer_delete(&phba->sli.mbox_tmo); /* Move mbox data to caller's mailbox region, do endian swapping */ if (pmb->mbox_cmpl && mbox) @@ -15660,7 +15661,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) void lpfc_sli4_poll_hbtimer(struct timer_list *t) { - struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); + struct lpfc_hba *phba = timer_container_of(phba, t, cpuhp_poll_timer); struct lpfc_queue *eq; rcu_read_lock(); @@ -15698,7 +15699,7 @@ static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) synchronize_rcu(); if (list_empty(&phba->poll_list)) - del_timer_sync(&phba->cpuhp_poll_timer); + timer_delete_sync(&phba->cpuhp_poll_timer); } void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c35f7225058e..749688aa8a82 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.7" +#define LPFC_DRIVER_VERSION "14.4.0.9" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -32,6 +32,6 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \ +#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \ "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ "and/or its subsidiaries." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 3d70cc517573..2797aa75a689 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) * fabric RA_TOV value and dev_loss tmo. The driver's * devloss_tmo is 10 giving this loop a 3x multiplier minimally. */ - wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); + wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3); wait_time_max += jiffies; start_time = jiffies; while (time_before(jiffies, wait_time_max)) { @@ -505,7 +505,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) wait_event_timeout(waitq, !test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags), - msecs_to_jiffies(phba->fc_ratov * 2000)); + secs_to_jiffies(phba->fc_ratov * 2)); if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) goto logo_cmpl; @@ -703,7 +703,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) wait_event_timeout(waitq, !test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags), - msecs_to_jiffies(phba->fc_ratov * 2000)); + secs_to_jiffies(phba->fc_ratov * 2)); } lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, |