diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-11-29 07:10:30 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-11-29 07:10:30 +0300 |
commit | f1be1e04c76bb9c44789d3575bba4418cf0ea359 (patch) | |
tree | 11bbdea7fb6f2319b31fd2aa84a93b4449fc6100 /drivers/net/ethernet/intel | |
parent | cd04b44bf055c4cd6bcee2ebfa6932fb20ef369d (diff) | |
parent | 95260816b489509c1f5b0141d0ae7b65be3c1d39 (diff) | |
download | linux-f1be1e04c76bb9c44789d3575bba4418cf0ea359.tar.xz |
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says:
====================
Intel Wired LAN Driver Updates 2023-11-27 (i40e, iavf)
This series contains updates to i40e and iavf drivers.
Ivan Vecera performs more cleanups on i40e and iavf drivers; removing
unused fields, defines, and unneeded fields.
Petr Oros utilizes iavf_schedule_aq_request() helper to replace open
coded equivalents.
* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
iavf: use iavf_schedule_aq_request() helper
iavf: Remove queue tracking fields from iavf_adminq_ring
i40e: Remove queue tracking fields from i40e_adminq_ring
i40e: Remove AQ register definitions for VF types
i40e: Delete unused and useless i40e_pf fields
====================
Link: https://lore.kernel.org/r/20231127211037.1135403-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_adminq.c | 86 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_adminq.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_common.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_register.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adminq.c | 86 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_adminq.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_common.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_main.c | 23 |
12 files changed, 90 insertions, 200 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 6022944d04f8..9b701615c7c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -468,9 +468,7 @@ struct i40e_pf { struct i40e_hw hw; DECLARE_BITMAP(state, __I40E_STATE_SIZE__); struct msix_entry *msix_entries; - bool fc_autoneg_status; - u16 eeprom_version; u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ @@ -486,7 +484,6 @@ struct i40e_pf { u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 num_alloc_vsi; /* num VSIs this driver supports */ - u8 atr_sample_rate; bool wol_en; struct hlist_head fdir_filter_list; @@ -524,12 +521,10 @@ struct i40e_pf { struct hlist_head cloud_filter_list; u16 num_cloud_filters; - enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; - u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list service_timer; @@ -543,7 +538,6 @@ struct i40e_pf { u32 tx_timeout_count; u32 tx_timeout_recovery_level; unsigned long tx_timeout_last_recovery; - u32 tx_sluggish_count; u32 hw_csum_rx_error; u32 led_status; u16 corer_count; /* Core reset count */ @@ -565,17 +559,13 @@ struct i40e_pf { struct i40e_lump_tracking *irq_pile; /* switch config info */ - u16 pf_seid; u16 main_vsi_seid; u16 mac_seid; - struct kobject *switch_kobj; #ifdef CONFIG_DEBUG_FS struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ bool cur_promisc; - u16 instance; /* A unique number per i40e_pf instance in the system */ - /* sr-iov config info */ struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ @@ -669,9 +659,7 @@ struct i40e_pf { unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct timespec64 ptp_prev_hw_time; - struct work_struct ptp_pps_work; struct work_struct ptp_extts0_work; - struct work_struct ptp_extts1_work; ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; @@ -679,10 +667,7 @@ struct i40e_pf { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; - u64 ptp_pps_start; - u32 pps_delay; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ - struct ptp_pin_desc ptp_pin[3]; unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; @@ -695,7 +680,6 @@ struct i40e_pf { u32 fd_inv; u16 phy_led_val; - u16 override_q_count; u16 last_sw_conf_flags; u16 last_sw_conf_valid_flags; /* List to keep previous DDP profiles to be rolled back in the future */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 896c43905309..f73f5930fc58 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -9,27 +9,6 @@ static void i40e_resume_aq(struct i40e_hw *hw); /** - * i40e_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void i40e_adminq_init_regs(struct i40e_hw *hw) -{ - /* set head and tail registers in our local struct */ - hw->aq.asq.tail = I40E_PF_ATQT; - hw->aq.asq.head = I40E_PF_ATQH; - hw->aq.asq.len = I40E_PF_ATQLEN; - hw->aq.asq.bal = I40E_PF_ATQBAL; - hw->aq.asq.bah = I40E_PF_ATQBAH; - hw->aq.arq.tail = I40E_PF_ARQT; - hw->aq.arq.head = I40E_PF_ARQH; - hw->aq.arq.len = I40E_PF_ARQLEN; - hw->aq.arq.bal = I40E_PF_ARQBAL; - hw->aq.arq.bah = I40E_PF_ARQBAH; -} - -/** * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -254,17 +233,17 @@ static int i40e_config_asq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, I40E_PF_ATQBAL); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = -EIO; @@ -283,20 +262,20 @@ static int i40e_config_arq_regs(struct i40e_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, I40E_PF_ARQBAL); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = -EIO; @@ -439,11 +418,11 @@ static int i40e_shutdown_asq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, I40E_PF_ATQH, 0); + wr32(hw, I40E_PF_ATQT, 0); + wr32(hw, I40E_PF_ATQLEN, 0); + wr32(hw, I40E_PF_ATQBAL, 0); + wr32(hw, I40E_PF_ATQBAH, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -473,11 +452,11 @@ static int i40e_shutdown_arq(struct i40e_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, I40E_PF_ARQH, 0); + wr32(hw, I40E_PF_ARQT, 0); + wr32(hw, I40E_PF_ARQLEN, 0); + wr32(hw, I40E_PF_ARQBAL, 0); + wr32(hw, I40E_PF_ARQBAH, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -608,9 +587,6 @@ int i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - i40e_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; @@ -720,9 +696,9 @@ static u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, I40E_PF_ATQH) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH)); if (details->callback) { I40E_ADMINQ_CALLBACK cb_func = @@ -756,7 +732,7 @@ static bool i40e_asq_done(struct i40e_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use; } @@ -797,7 +773,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, hw->aq.asq_last_status = I40E_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, I40E_PF_ATQH); if (val >= hw->aq.num_asq_entries) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -889,7 +865,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -949,7 +925,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { + if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = -EIO; @@ -1103,7 +1079,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; + ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = -EALREADY; @@ -1151,7 +1127,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, I40E_PF_ARQT, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 80125bea80a2..ee86d2c53079 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -29,13 +29,6 @@ struct i40e_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index e171f4814e21..bd52b73cf61f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -195,11 +195,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, **/ bool i40e_check_asq_alive(struct i40e_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - I40E_PF_ATQLEN_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, I40E_PF_ATQLEN) & I40E_PF_ATQLEN_ATQENABLE_MASK); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 88240571721a..ef70ddbe9c2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1028,9 +1028,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); - dev_info(&pf->pdev->dev, - "pf tx sluggish count: %d\n", - pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7ded598a68e6..9eeea8d9ab67 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3465,7 +3465,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) /* some ATR related tx ring init */ if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { - ring->atr_sample_rate = vsi->back->atr_sample_rate; + ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; ring->atr_count = 0; } else { ring->atr_sample_rate = 0; @@ -10127,7 +10127,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) return; /* check for error indications */ - val = rd32(&pf->hw, pf->hw.aq.arq.len); + val = rd32(&pf->hw, I40E_PF_ARQLEN); oldval = val; if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) @@ -10146,9 +10146,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.arq.len, val); + wr32(&pf->hw, I40E_PF_ARQLEN, val); - val = rd32(&pf->hw, pf->hw.aq.asq.len); + val = rd32(&pf->hw, I40E_PF_ATQLEN); oldval = val; if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { if (pf->hw.debug_mask & I40E_DEBUG_AQ) @@ -10166,7 +10166,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; } if (oldval != val) - wr32(&pf->hw, pf->hw.aq.asq.len, val); + wr32(&pf->hw, I40E_PF_ATQLEN, val); event.buf_len = I40E_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); @@ -10226,9 +10226,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (i++ < pf->adminq_work_limit); + } while (i++ < I40E_AQ_WORK_LIMIT); - if (i < pf->adminq_work_limit) + if (i < I40E_AQ_WORK_LIMIT) clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); /* re-enable Admin queue interrupt cause */ @@ -12769,7 +12769,6 @@ static int i40e_sw_init(struct i40e_pf *pf) if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || (pf->hw.func_caps.fd_filters_best_effort > 0)) { set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); - pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && pf->hw.num_partitions > 1) dev_info(&pf->pdev->dev, @@ -12815,7 +12814,6 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -14976,12 +14974,11 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, * the PF's VSI */ pf->mac_seid = uplink_seid; - pf->pf_seid = downlink_seid; pf->main_vsi_seid = seid; if (printconfig) dev_info(&pf->pdev->dev, "pf_seid=%d main_vsi_seid=%d\n", - pf->pf_seid, pf->main_vsi_seid); + downlink_seid, pf->main_vsi_seid); break; case I40E_SWITCH_ELEMENT_TYPE_PF: case I40E_SWITCH_ELEMENT_TYPE_VF: @@ -15160,10 +15157,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui /* fill in link information and enable LSE reporting */ i40e_link_event(pf); - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - i40e_ptp_init(pf); if (!lock_acquired) @@ -15637,7 +15630,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* CONFIG_I40E_DCB */ struct i40e_pf *pf; struct i40e_hw *hw; - static u16 pfs_found; u16 wol_nvm_bits; char nvm_ver[32]; u16 link_status; @@ -15715,7 +15707,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; - pf->instance = pfs_found; /* Select something other than the 802.1ad ethertype for the * switch to use internally and drop on ingress. @@ -15777,7 +15768,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; - pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index d561687303ea..2e1eaca44343 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -863,16 +863,6 @@ #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VFQF_HLUT_MAX_INDEX 15 diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c index 9ffbd24d83cb..82fcd18ad660 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c @@ -8,27 +8,6 @@ #include "iavf_prototype.h" /** - * iavf_adminq_init_regs - Initialize AdminQ registers - * @hw: pointer to the hardware structure - * - * This assumes the alloc_asq and alloc_arq functions have already been called - **/ -static void iavf_adminq_init_regs(struct iavf_hw *hw) -{ - /* set head and tail registers in our local struct */ - hw->aq.asq.tail = IAVF_VF_ATQT1; - hw->aq.asq.head = IAVF_VF_ATQH1; - hw->aq.asq.len = IAVF_VF_ATQLEN1; - hw->aq.asq.bal = IAVF_VF_ATQBAL1; - hw->aq.asq.bah = IAVF_VF_ATQBAH1; - hw->aq.arq.tail = IAVF_VF_ARQT1; - hw->aq.arq.head = IAVF_VF_ARQH1; - hw->aq.arq.len = IAVF_VF_ARQLEN1; - hw->aq.arq.bal = IAVF_VF_ARQBAL1; - hw->aq.arq.bah = IAVF_VF_ARQBAH1; -} - -/** * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings * @hw: pointer to the hardware structure **/ @@ -259,17 +238,17 @@ static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); /* set starting point */ - wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + wr32(hw, IAVF_VF_ATQLEN1, (hw->aq.num_asq_entries | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); - wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); - wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAL1, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, IAVF_VF_ATQBAH1, upper_32_bits(hw->aq.asq.desc_buf.pa)); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.asq.bal); + reg = rd32(hw, IAVF_VF_ATQBAL1); if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -288,20 +267,20 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) u32 reg = 0; /* Clear Head and Tail */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); /* set starting point */ - wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + wr32(hw, IAVF_VF_ARQLEN1, (hw->aq.num_arq_entries | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); - wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); - wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAL1, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, IAVF_VF_ARQBAH1, upper_32_bits(hw->aq.arq.desc_buf.pa)); /* Update tail in the HW to post pre-allocated buffers */ - wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + wr32(hw, IAVF_VF_ARQT1, hw->aq.num_arq_entries - 1); /* Check one register to verify that config was applied */ - reg = rd32(hw, hw->aq.arq.bal); + reg = rd32(hw, IAVF_VF_ARQBAL1); if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; @@ -455,11 +434,11 @@ static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); + wr32(hw, IAVF_VF_ATQH1, 0); + wr32(hw, IAVF_VF_ATQT1, 0); + wr32(hw, IAVF_VF_ATQLEN1, 0); + wr32(hw, IAVF_VF_ATQBAL1, 0); + wr32(hw, IAVF_VF_ATQBAH1, 0); hw->aq.asq.count = 0; /* to indicate uninitialized queue */ @@ -489,11 +468,11 @@ static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) } /* Stop firmware AdminQ processing */ - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); + wr32(hw, IAVF_VF_ARQH1, 0); + wr32(hw, IAVF_VF_ARQT1, 0); + wr32(hw, IAVF_VF_ARQLEN1, 0); + wr32(hw, IAVF_VF_ARQBAL1, 0); + wr32(hw, IAVF_VF_ARQBAH1, 0); hw->aq.arq.count = 0; /* to indicate uninitialized queue */ @@ -529,9 +508,6 @@ enum iavf_status iavf_init_adminq(struct iavf_hw *hw) goto init_adminq_exit; } - /* Set up register offsets */ - iavf_adminq_init_regs(hw); - /* setup ASQ command write back timeout */ hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; @@ -587,9 +563,9 @@ static u16 iavf_clean_asq(struct iavf_hw *hw) desc = IAVF_ADMINQ_DESC(*asq, ntc); details = IAVF_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { + while (rd32(hw, IAVF_VF_ATQH1) != ntc) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, - "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); + "ntc %d head %d.\n", ntc, rd32(hw, IAVF_VF_ATQH1)); if (details->callback) { IAVF_ADMINQ_CALLBACK cb_func = @@ -624,7 +600,7 @@ bool iavf_asq_done(struct iavf_hw *hw) /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + return rd32(hw, IAVF_VF_ATQH1) == hw->aq.asq.next_to_use; } /** @@ -663,7 +639,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, hw->aq.asq_last_status = IAVF_AQ_RC_OK; - val = rd32(hw, hw->aq.asq.head); + val = rd32(hw, IAVF_VF_ATQH1); if (val >= hw->aq.num_asq_entries) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: head overrun at %d\n", val); @@ -755,7 +731,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; if (!details->postpone) - wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + wr32(hw, IAVF_VF_ATQT1, hw->aq.asq.next_to_use); /* if cmd_details are not defined or async flag is not set, * we need to wait for desc write back @@ -810,7 +786,7 @@ enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { + if (rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: AQ Critical error.\n"); status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; @@ -878,7 +854,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, } /* set next_to_use to head */ - ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; + ntu = rd32(hw, IAVF_VF_ARQH1) & IAVF_VF_ARQH1_ARQH_MASK; if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; @@ -926,7 +902,7 @@ enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); /* set tail = the last cleaned desc index. */ - wr32(hw, hw->aq.arq.tail, ntc); + wr32(hw, IAVF_VF_ARQT1, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == hw->aq.num_arq_entries) diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h index 1f60518eb0e5..406506f64bdd 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h @@ -29,13 +29,6 @@ struct iavf_adminq_ring { /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; - - /* used for queue tracking */ - u32 head; - u32 tail; - u32 len; - u32 bah; - u32 bal; }; /* ASQ transaction details */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 8091e6feca01..89d2bce529ae 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -279,11 +279,11 @@ void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, **/ bool iavf_check_asq_alive(struct iavf_hw *hw) { - if (hw->aq.asq.len) - return !!(rd32(hw, hw->aq.asq.len) & - IAVF_VF_ATQLEN1_ATQENABLE_MASK); - else + /* Check if the queue is initialized */ + if (!hw->aq.asq.count) return false; + + return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 6f236d1a6444..6b3d3e54b8b7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1445,10 +1445,9 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER); ret: if (err && fltr) @@ -1479,7 +1478,6 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr) { if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; } else { err = -EBUSY; } @@ -1489,7 +1487,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER); return err; } @@ -1658,7 +1656,6 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, rss_old->hash_flds = hash_flds; memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, sizeof(rss_new->cfg_msg)); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } else { err = -EEXIST; } @@ -1668,12 +1665,11 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, rss_new->packet_hdrs = hdrs; rss_new->hash_flds = hash_flds; list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); - adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG; } spin_unlock_bh(&adapter->adv_rss_lock); if (!err) - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG); mutex_unlock(&adapter->crit_lock); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index c862ebcd2e39..06a87030c163 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1059,13 +1059,12 @@ static int iavf_replace_primary_mac(struct iavf_adapter *adapter, */ new_f->is_primary = true; new_f->add = true; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; ether_addr_copy(hw->mac.addr, new_mac); spin_unlock_bh(&adapter->mac_vlan_list_lock); /* schedule the watchdog task to immediately process the request */ - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_MAC_FILTER); return 0; } @@ -1284,8 +1283,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) iavf_napi_enable_all(adapter); - adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ENABLE_QUEUES); } /** @@ -1439,8 +1437,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; } - adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DISABLE_QUEUES); } /** @@ -2337,10 +2334,8 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter, } } - if (aq_required) { - adapter->aq_required |= aq_required; - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - } + if (aq_required) + iavf_schedule_aq_request(adapter, aq_required); } /** @@ -3253,7 +3248,7 @@ static void iavf_adminq_task(struct work_struct *work) goto freedom; /* check for error indications */ - val = rd32(hw, hw->aq.arq.len); + val = rd32(hw, IAVF_VF_ARQLEN1); if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */ goto freedom; oldval = val; @@ -3270,9 +3265,9 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.arq.len, val); + wr32(hw, IAVF_VF_ARQLEN1, val); - val = rd32(hw, hw->aq.asq.len); + val = rd32(hw, IAVF_VF_ATQLEN1); oldval = val; if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) { dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); @@ -3287,7 +3282,7 @@ static void iavf_adminq_task(struct work_struct *work) val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; } if (oldval != val) - wr32(hw, hw->aq.asq.len, val); + wr32(hw, IAVF_VF_ATQLEN1, val); freedom: kfree(event.msg_buf); |