From 38e97a98e371c615f67d144ee9e4f7087c0a41e8 Mon Sep 17 00:00:00 2001 From: Piotr Raczynski Date: Mon, 15 May 2023 21:03:12 +0200 Subject: ice: move interrupt related code to separate file Keep interrupt handling code in a dedicated file. This helps keep driver structured better and prepares for more functionality added to this file. Reviewed-by: Jacob Keller Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 218 ------------------------------ 1 file changed, 218 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1f7c8edc22f..c377bacc5e2e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3900,224 +3900,6 @@ static int ice_init_pf(struct ice_pf *pf) return 0; } -/** - * ice_reduce_msix_usage - Reduce usage of MSI-X vectors - * @pf: board private structure - * @v_remain: number of remaining MSI-X vectors to be distributed - * - * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. - * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of - * remaining vectors. - */ -static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) -{ - int v_rdma; - - if (!ice_is_rdma_ena(pf)) { - pf->num_lan_msix = v_remain; - return; - } - - /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { - dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining vectors to LAN MSIX */ - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; - } else { - /* Split remaining MSIX with RDMA after accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } -} - -/** - * ice_ena_msix_range - Request a range of MSIX vectors from the OS - * @pf: board private structure - * - * Compute the number of MSIX vectors wanted and request from the OS. Adjust - * device usage if there are not enough vectors. Return the number of vectors - * reserved or negative on failure. - */ -static int ice_ena_msix_range(struct ice_pf *pf) -{ - int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; - struct device *dev = ice_pf_to_dev(pf); - int err, i; - - hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; - num_cpus = num_online_cpus(); - - /* LAN miscellaneous handler */ - v_other = ICE_MIN_LAN_OICR_MSIX; - - /* Flow Director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) - v_other += ICE_FDIR_MSIX; - - /* switchdev */ - v_other += ICE_ESWITCH_MSIX; - - v_wanted = v_other; - - /* LAN traffic */ - pf->num_lan_msix = num_cpus; - v_wanted += pf->num_lan_msix; - - /* RDMA auxiliary driver */ - if (ice_is_rdma_ena(pf)) { - pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - v_wanted += pf->num_rdma_msix; - } - - if (v_wanted > hw_num_msix) { - int v_remain; - - dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", - v_wanted, hw_num_msix); - - if (hw_num_msix < ICE_MIN_MSIX) { - err = -ERANGE; - goto exit_err; - } - - v_remain = hw_num_msix - v_other; - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { - v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; - v_remain = ICE_MIN_LAN_TXRX_MSIX; - } - - ice_reduce_msix_usage(pf, v_remain); - v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; - - dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - - pf->msix_entries = devm_kcalloc(dev, v_wanted, - sizeof(*pf->msix_entries), GFP_KERNEL); - if (!pf->msix_entries) { - err = -ENOMEM; - goto exit_err; - } - - for (i = 0; i < v_wanted; i++) - pf->msix_entries[i].entry = i; - - /* actually reserve the vectors */ - v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_wanted); - if (v_actual < 0) { - dev_err(dev, "unable to reserve MSI-X vectors\n"); - err = v_actual; - goto msix_err; - } - - if (v_actual < v_wanted) { - dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_wanted, v_actual); - - if (v_actual < ICE_MIN_MSIX) { - /* error if we can't get minimum vectors */ - pci_disable_msix(pf->pdev); - err = -ERANGE; - goto msix_err; - } else { - int v_remain = v_actual - v_other; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) - v_remain = ICE_MIN_LAN_TXRX_MSIX; - - ice_reduce_msix_usage(pf, v_remain); - - dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - } - - return v_actual; - -msix_err: - devm_kfree(dev, pf->msix_entries); - -exit_err: - pf->num_rdma_msix = 0; - pf->num_lan_msix = 0; - return err; -} - -/** - * ice_dis_msix - Disable MSI-X interrupt setup in OS - * @pf: board private structure - */ -static void ice_dis_msix(struct ice_pf *pf) -{ - pci_disable_msix(pf->pdev); - devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); - pf->msix_entries = NULL; -} - -/** - * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme - * @pf: board private structure - */ -static void ice_clear_interrupt_scheme(struct ice_pf *pf) -{ - ice_dis_msix(pf); - - if (pf->irq_tracker) { - devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); - pf->irq_tracker = NULL; - } -} - -/** - * ice_init_interrupt_scheme - Determine proper interrupt scheme - * @pf: board private structure to initialize - */ -static int ice_init_interrupt_scheme(struct ice_pf *pf) -{ - int vectors; - - vectors = ice_ena_msix_range(pf); - - if (vectors < 0) - return vectors; - - /* set up vector assignment tracking */ - pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf), - struct_size(pf->irq_tracker, list, vectors), - GFP_KERNEL); - if (!pf->irq_tracker) { - ice_dis_msix(pf); - return -ENOMEM; - } - - /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = (u16)vectors; - pf->irq_tracker->num_entries = (u16)vectors; - pf->irq_tracker->end = pf->irq_tracker->num_entries; - - return 0; -} - /** * ice_is_wol_supported - check if WoL is supported * @hw: pointer to hardware info -- cgit v1.2.3 From afe87cfe820e74cc5fc59359445bcfd93ff0ab07 Mon Sep 17 00:00:00 2001 From: Piotr Raczynski Date: Mon, 15 May 2023 21:03:13 +0200 Subject: ice: use pci_irq_vector helper function Currently, driver gets interrupt number directly from ice_pf::msix_entries array. Use helper function dedicated to do just that. While at it use a variable to store interrupt number in ice_free_irq_msix_misc instead of calling the helper function twice. Reviewed-by: Michal Swiatkowski Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_arfs.c | 2 +- drivers/net/ethernet/intel/ice/ice_lib.c | 4 ++-- drivers/net/ethernet/intel/ice/ice_main.c | 12 ++++++------ drivers/net/ethernet/intel/ice/ice_ptp.c | 2 +- drivers/net/ethernet/intel/ice/ice_xsk.c | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index fba178e07600..e81797344f5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -616,7 +616,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) base_idx = vsi->base_vector; ice_for_each_q_vector(vsi, i) if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - pf->msix_entries[base_idx + i].vector)) { + pci_irq_vector(pf->pdev, base_idx + i))) { ice_free_cpu_rx_rmap(vsi); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 450317dfcca7..79e1557f77e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3056,7 +3056,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) u16 vector = i + base; int irq_num; - irq_num = pf->msix_entries[vector].vector; + irq_num = pci_irq_vector(pf->pdev, vector); /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || @@ -3235,7 +3235,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) return; ice_for_each_q_vector(vsi, i) - synchronize_irq(pf->msix_entries[i + base].vector); + synchronize_irq(pci_irq_vector(pf->pdev, i + base)); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c377bacc5e2e..c103be660a9c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2501,7 +2501,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) for (vector = 0; vector < q_vectors; vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[vector]; - irq_num = pf->msix_entries[base + vector].vector; + irq_num = pci_irq_vector(pf->pdev, base + vector); if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -2557,7 +2557,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector) { vector--; - irq_num = pf->msix_entries[base + vector].vector; + irq_num = pci_irq_vector(pf->pdev, base + vector); if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); @@ -3234,6 +3234,7 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) */ static void ice_free_irq_msix_misc(struct ice_pf *pf) { + int misc_irq_num = pci_irq_vector(pf->pdev, pf->oicr_idx); struct ice_hw *hw = &pf->hw; ice_dis_ctrlq_interrupts(hw); @@ -3243,9 +3244,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ice_flush(hw); if (pf->msix_entries) { - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); - devm_free_irq(ice_pf_to_dev(pf), - pf->msix_entries[pf->oicr_idx].vector, pf); + synchronize_irq(misc_irq_num); + devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); } pf->num_avail_sw_msix += 1; @@ -3317,7 +3317,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) pf->oicr_idx = (u16)oicr_idx; err = devm_request_threaded_irq(dev, - pf->msix_entries[pf->oicr_idx].vector, + pci_irq_vector(pf->pdev, pf->oicr_idx), ice_misc_intr, ice_misc_intr_thread_fn, 0, pf->int_name, pf); if (err) { diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index ac6f06f9a2ed..972d4f6fd615 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -911,7 +911,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) spin_unlock(&tx->lock); /* wait for potentially outstanding interrupt to complete */ - synchronize_irq(pf->msix_entries[pf->oicr_idx].vector); + synchronize_irq(pci_irq_vector(pf->pdev, pf->oicr_idx)); ice_ptp_flush_tx_tracker(pf, tx); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index d1e489da7363..4102416d7a41 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -107,7 +107,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); ice_flush(hw); - synchronize_irq(pf->msix_entries[v_idx + base].vector); + synchronize_irq(pci_irq_vector(pf->pdev, v_idx + base)); } } -- cgit v1.2.3 From 05018936a1fe07b92a612d1fd1511224ba8a1a34 Mon Sep 17 00:00:00 2001 From: Piotr Raczynski Date: Mon, 15 May 2023 21:03:14 +0200 Subject: ice: use preferred MSIX allocation api Move away from using pci_enable_msix_range/pci_disable_msix and use pci_alloc_irq_vectors/pci_free_irq_vectors instead. As a result stop tracking msix_entries since with newer API entries are handled by MSIX core. However, due to current design of communication with RDMA driver which accesses ice_pf::msix_entries directly, keep using the array just for RDMA driver use. Reviewed-by: Michal Swiatkowski Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_idc.c | 29 ++++++++++++++++++++-- drivers/net/ethernet/intel/ice/ice_irq.c | 40 +++++++------------------------ drivers/net/ethernet/intel/ice/ice_main.c | 6 ++--- 3 files changed, 37 insertions(+), 38 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index e6bc2285071e..1000759505d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -235,14 +235,33 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params); static int ice_reserve_rdma_qvector(struct ice_pf *pf) { if (ice_is_rdma_ena(pf)) { - int index; + int index, i; index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, ICE_RES_RDMA_VEC_ID); if (index < 0) return index; + + pf->msix_entries = kcalloc(pf->num_rdma_msix, + sizeof(*pf->msix_entries), + GFP_KERNEL); + if (!pf->msix_entries) { + ice_free_res(pf->irq_tracker, pf->rdma_base_vector, + ICE_RES_RDMA_VEC_ID); + return -ENOMEM; + } + pf->num_avail_sw_msix -= pf->num_rdma_msix; - pf->rdma_base_vector = (u16)index; + + /* RDMA is the only user of pf->msix_entries array */ + pf->rdma_base_vector = 0; + + for (i = 0; i < pf->num_rdma_msix; i++, index++) { + struct msix_entry *entry = &pf->msix_entries[i]; + + entry->entry = index; + entry->vector = pci_irq_vector(pf->pdev, index); + } } return 0; } @@ -253,6 +272,12 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf) */ static void ice_free_rdma_qvector(struct ice_pf *pf) { + if (!pf->msix_entries) + return; + + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pf->num_avail_sw_msix -= pf->num_rdma_msix; ice_free_res(pf->irq_tracker, pf->rdma_base_vector, ICE_RES_RDMA_VEC_ID); diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index 1fc7daec9732..f61be5d76373 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -59,7 +59,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) { int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; struct device *dev = ice_pf_to_dev(pf); - int err, i; + int err; hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; num_cpus = num_online_cpus(); @@ -113,23 +113,13 @@ static int ice_ena_msix_range(struct ice_pf *pf) pf->num_rdma_msix); } - pf->msix_entries = devm_kcalloc(dev, v_wanted, - sizeof(*pf->msix_entries), GFP_KERNEL); - if (!pf->msix_entries) { - err = -ENOMEM; - goto exit_err; - } - - for (i = 0; i < v_wanted; i++) - pf->msix_entries[i].entry = i; - /* actually reserve the vectors */ - v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_wanted); + v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted, + PCI_IRQ_MSIX); if (v_actual < 0) { dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; - goto msix_err; + goto exit_err; } if (v_actual < v_wanted) { @@ -138,9 +128,9 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ - pci_disable_msix(pf->pdev); + pci_free_irq_vectors(pf->pdev); err = -ERANGE; - goto msix_err; + goto exit_err; } else { int v_remain = v_actual - v_other; @@ -160,33 +150,19 @@ static int ice_ena_msix_range(struct ice_pf *pf) return v_actual; -msix_err: - devm_kfree(dev, pf->msix_entries); - exit_err: pf->num_rdma_msix = 0; pf->num_lan_msix = 0; return err; } -/** - * ice_dis_msix - Disable MSI-X interrupt setup in OS - * @pf: board private structure - */ -static void ice_dis_msix(struct ice_pf *pf) -{ - pci_disable_msix(pf->pdev); - devm_kfree(ice_pf_to_dev(pf), pf->msix_entries); - pf->msix_entries = NULL; -} - /** * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme * @pf: board private structure */ void ice_clear_interrupt_scheme(struct ice_pf *pf) { - ice_dis_msix(pf); + pci_free_irq_vectors(pf->pdev); if (pf->irq_tracker) { devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker); @@ -213,7 +189,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) vectors), GFP_KERNEL); if (!pf->irq_tracker) { - ice_dis_msix(pf); + pci_free_irq_vectors(pf->pdev); return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c103be660a9c..ce8cd49ae10c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3243,10 +3243,8 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, 0); ice_flush(hw); - if (pf->msix_entries) { - synchronize_irq(misc_irq_num); - devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); - } + synchronize_irq(misc_irq_num); + devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); pf->num_avail_sw_msix += 1; ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); -- cgit v1.2.3 From 4aad5335969f25c4dc966a15c5497db3718538bb Mon Sep 17 00:00:00 2001 From: Piotr Raczynski Date: Mon, 15 May 2023 21:03:17 +0200 Subject: ice: add individual interrupt allocation Currently interrupt allocations, depending on a feature are distributed in batches. Also, after allocation there is a series of operations that distributes per irq settings through that batch of interrupts. Although driver does not yet support dynamic interrupt allocation, keep allocated interrupts in a pool and add allocation abstraction logic to make code more flexible. Keep per interrupt information in the ice_q_vector structure, which yields ice_vsi::base_vector redundant. Also, as a result there are a few functions that can be removed. Reviewed-by: Jacob Keller Reviewed-by: Michal Swiatkowski Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 11 +- drivers/net/ethernet/intel/ice/ice_arfs.c | 5 +- drivers/net/ethernet/intel/ice/ice_base.c | 50 +++++- drivers/net/ethernet/intel/ice/ice_ethtool.c | 2 +- drivers/net/ethernet/intel/ice/ice_idc.c | 45 +++--- drivers/net/ethernet/intel/ice/ice_irq.c | 46 +++++- drivers/net/ethernet/intel/ice/ice_irq.h | 3 + drivers/net/ethernet/intel/ice/ice_lib.c | 228 +++------------------------ drivers/net/ethernet/intel/ice/ice_lib.h | 4 +- drivers/net/ethernet/intel/ice/ice_main.c | 44 +++--- drivers/net/ethernet/intel/ice/ice_ptp.c | 2 +- drivers/net/ethernet/intel/ice/ice_sriov.c | 2 +- drivers/net/ethernet/intel/ice/ice_xsk.c | 5 +- 13 files changed, 165 insertions(+), 282 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 4dc4c6bec081..d8dde291491e 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -105,10 +105,6 @@ #define ICE_Q_WAIT_MAX_RETRY (5 * ICE_Q_WAIT_RETRY_LIMIT) #define ICE_MAX_LG_RSS_QS 256 #define ICE_RES_VALID_BIT 0x8000 -#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) -#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1) -/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ -#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ @@ -349,7 +345,6 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; u16 num_q_vectors; - u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ @@ -480,6 +475,7 @@ struct ice_q_vector { char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + struct msi_map irq; } ____cacheline_internodealigned_in_smp; enum ice_pf_flags { @@ -584,8 +580,7 @@ struct ice_pf { u32 hw_csum_rx_error; u32 oicr_err_reg; - u16 oicr_idx; /* Other interrupt cause MSIX vector index */ - u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ + struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ u16 num_lan_msix; /* Total MSIX vectors for base driver */ @@ -671,7 +666,7 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, struct ice_q_vector *q_vector) { u32 vector = (vsi && q_vector) ? q_vector->reg_idx : - ((struct ice_pf *)hw->back)->oicr_idx; + ((struct ice_pf *)hw->back)->oicr_irq.index; int itr = ICE_ITR_NONE; u32 val; diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index e81797344f5e..cca0e753f38f 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -596,7 +596,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; struct ice_pf *pf; - int base_idx, i; + int i; if (!vsi || vsi->type != ICE_VSI_PF) return 0; @@ -613,10 +613,9 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) if (unlikely(!netdev->rx_cpu_rmap)) return -EINVAL; - base_idx = vsi->base_vector; ice_for_each_q_vector(vsi, i) if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - pci_irq_vector(pf->pdev, base_idx + i))) { + vsi->q_vectors[i]->irq.virq)) { ice_free_cpu_rx_rmap(vsi); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 1911d644dfa8..cb0913cb9741 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -103,10 +103,10 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) { struct ice_pf *pf = vsi->back; struct ice_q_vector *q_vector; + int err; /* allocate q_vector */ - q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), - GFP_KERNEL); + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); if (!q_vector) return -ENOMEM; @@ -118,9 +118,34 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->rx.itr_mode = ITR_DYNAMIC; q_vector->tx.type = ICE_TX_CONTAINER; q_vector->rx.type = ICE_RX_CONTAINER; + q_vector->irq.index = -ENOENT; - if (vsi->type == ICE_VSI_VF) + if (vsi->type == ICE_VSI_VF) { + q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; + } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { + struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); + + if (ctrl_vsi) { + if (unlikely(!ctrl_vsi->q_vectors)) { + err = -ENOENT; + goto err_free_q_vector; + } + + q_vector->irq = ctrl_vsi->q_vectors[0]->irq; + goto skip_alloc; + } + } + + q_vector->irq = ice_alloc_irq(pf); + if (q_vector->irq.index < 0) { + err = -ENOMEM; + goto err_free_q_vector; + } + +skip_alloc: + q_vector->reg_idx = q_vector->irq.index; + /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) cpumask_set_cpu(v_idx, &q_vector->affinity_mask); @@ -137,6 +162,11 @@ out: vsi->q_vectors[v_idx] = q_vector; return 0; + +err_free_q_vector: + kfree(q_vector); + + return err; } /** @@ -168,7 +198,19 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) if (vsi->netdev) netif_napi_del(&q_vector->napi); - devm_kfree(dev, q_vector); + /* release MSIX interrupt if q_vector had interrupt allocated */ + if (q_vector->irq.index < 0) + goto free_q_vector; + + /* only free last VF ctrl vsi interrupt */ + if (vsi->type == ICE_VSI_CTRL && vsi->vf && + ice_get_vf_ctrl_vsi(pf, vsi)) + goto free_q_vector; + + ice_free_irq(pf, q_vector->irq); + +free_q_vector: + kfree(q_vector); vsi->q_vectors[v_idx] = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f86e814354a3..8407c7175cf6 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -956,7 +956,7 @@ static u64 ice_intr_test(struct net_device *netdev) netdev_info(netdev, "interrupt test\n"); - wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(&pf->hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M | GLINT_DYN_CTL_SWINT_TRIG_M); diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 1000759505d7..bc016bb4440c 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -229,38 +229,33 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) EXPORT_SYMBOL_GPL(ice_get_qos_params); /** - * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver + * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver * @pf: board private structure to initialize */ -static int ice_reserve_rdma_qvector(struct ice_pf *pf) +static int ice_alloc_rdma_qvectors(struct ice_pf *pf) { if (ice_is_rdma_ena(pf)) { - int index, i; - - index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix, - ICE_RES_RDMA_VEC_ID); - if (index < 0) - return index; + int i; pf->msix_entries = kcalloc(pf->num_rdma_msix, sizeof(*pf->msix_entries), GFP_KERNEL); - if (!pf->msix_entries) { - ice_free_res(pf->irq_tracker, pf->rdma_base_vector, - ICE_RES_RDMA_VEC_ID); + if (!pf->msix_entries) return -ENOMEM; - } - - pf->num_avail_sw_msix -= pf->num_rdma_msix; /* RDMA is the only user of pf->msix_entries array */ pf->rdma_base_vector = 0; - for (i = 0; i < pf->num_rdma_msix; i++, index++) { + for (i = 0; i < pf->num_rdma_msix; i++) { struct msix_entry *entry = &pf->msix_entries[i]; + struct msi_map map; - entry->entry = index; - entry->vector = pci_irq_vector(pf->pdev, index); + map = ice_alloc_irq(pf); + if (map.index < 0) + break; + + entry->entry = map.index; + entry->vector = map.virq; } } return 0; @@ -272,15 +267,21 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf) */ static void ice_free_rdma_qvector(struct ice_pf *pf) { + int i; + if (!pf->msix_entries) return; + for (i = 0; i < pf->num_rdma_msix; i++) { + struct msi_map map; + + map.index = pf->msix_entries[i].entry; + map.virq = pf->msix_entries[i].vector; + ice_free_irq(pf, map); + } + kfree(pf->msix_entries); pf->msix_entries = NULL; - - pf->num_avail_sw_msix -= pf->num_rdma_msix; - ice_free_res(pf->irq_tracker, pf->rdma_base_vector, - ICE_RES_RDMA_VEC_ID); } /** @@ -382,7 +383,7 @@ int ice_init_rdma(struct ice_pf *pf) } /* Reserve vector resources */ - ret = ice_reserve_rdma_qvector(pf); + ret = ice_alloc_rdma_qvectors(pf); if (ret < 0) { dev_err(dev, "failed to reserve vectors for RDMA\n"); goto err_reserve_rdma_qvector; diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index f61be5d76373..ca1a1de26766 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -194,9 +194,53 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) } /* populate SW interrupts pool with number of OS granted IRQs. */ - pf->num_avail_sw_msix = (u16)vectors; pf->irq_tracker->num_entries = (u16)vectors; pf->irq_tracker->end = pf->irq_tracker->num_entries; return 0; } + +/** + * ice_alloc_irq - Allocate new interrupt vector + * @pf: board private structure + * + * Allocate new interrupt vector for a given owner id. + * return struct msi_map with interrupt details and track + * allocated interrupt appropriately. + * + * This function mimics individual interrupt allocation, + * even interrupts are actually already allocated with + * pci_alloc_irq_vectors. Individual allocation helps + * to track interrupts and simplifies interrupt related + * handling. + * + * On failure, return map with negative .index. The caller + * is expected to check returned map index. + * + */ +struct msi_map ice_alloc_irq(struct ice_pf *pf) +{ + struct msi_map map = { .index = -ENOENT }; + int entry; + + entry = ice_get_res(pf, pf->irq_tracker); + if (entry < 0) + return map; + + map.index = entry; + map.virq = pci_irq_vector(pf->pdev, map.index); + + return map; +} + +/** + * ice_free_irq - Free interrupt vector + * @pf: board private structure + * @map: map with interrupt details + * + * Remove allocated interrupt from the interrupt tracker + */ +void ice_free_irq(struct ice_pf *pf, struct msi_map map) +{ + ice_free_res(pf->irq_tracker, map.index); +} diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h index 82475162ab70..26e80dfe22b5 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.h +++ b/drivers/net/ethernet/intel/ice/ice_irq.h @@ -7,4 +7,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf); void ice_clear_interrupt_scheme(struct ice_pf *pf); +struct msi_map ice_alloc_irq(struct ice_pf *pf); +void ice_free_irq(struct ice_pf *pf, struct msi_map map); + #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 25511240685d..fe908cf6da6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1374,162 +1374,45 @@ out: * ice_free_res - free a block of resources * @res: pointer to the resource * @index: starting index previously returned by ice_get_res - * @id: identifier to track owner * * Returns number of resources freed */ -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id) +int ice_free_res(struct ice_res_tracker *res, u16 index) { - int count = 0; - int i; - if (!res || index >= res->end) return -EINVAL; - id |= ICE_RES_VALID_BIT; - for (i = index; i < res->end && res->list[i] == id; i++) { - res->list[i] = 0; - count++; - } - - return count; -} - -/** - * ice_search_res - Search the tracker for a block of resources - * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner - * - * Returns the base item index of the block, or -ENOMEM for error - */ -static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) -{ - u16 start = 0, end = 0; - - if (needed > res->end) - return -ENOMEM; - - id |= ICE_RES_VALID_BIT; - - do { - /* skip already allocated entries */ - if (res->list[end++] & ICE_RES_VALID_BIT) { - start = end; - if ((start + needed) > res->end) - break; - } - - if (end == (start + needed)) { - int i = start; - - /* there was enough, so assign it to the requestor */ - while (i != end) - res->list[i++] = id; - - return start; - } - } while (end < res->end); - - return -ENOMEM; -} - -/** - * ice_get_free_res_count - Get free count from a resource tracker - * @res: Resource tracker instance - */ -static u16 ice_get_free_res_count(struct ice_res_tracker *res) -{ - u16 i, count = 0; + res->list[index] = 0; - for (i = 0; i < res->end; i++) - if (!(res->list[i] & ICE_RES_VALID_BIT)) - count++; - - return count; + return 0; } /** - * ice_get_res - get a block of resources + * ice_get_res - get a resource from the tracker * @pf: board private structure * @res: pointer to the resource - * @needed: size of the block needed - * @id: identifier to track owner * - * Returns the base item index of the block, or negative for error + * Returns the item index, or negative for error */ int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res) { - if (!res || !pf) - return -EINVAL; + u16 i; - if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { - dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n", - needed, res->num_entries, id); + if (!res || !pf) return -EINVAL; - } - - return ice_search_res(res, needed, id); -} - -/** - * ice_vsi_setup_vector_base - Set up the base vector for the given VSI - * @vsi: ptr to the VSI - * - * This should only be called after ice_vsi_alloc_def() which allocates the - * corresponding SW VSI structure and initializes num_queue_pairs for the - * newly allocated VSI. - * - * Returns 0 on success or negative on failure - */ -static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - u16 num_q_vectors, id; - struct device *dev; - int base; - dev = ice_pf_to_dev(pf); - /* SRIOV doesn't grab irq_tracker entries for each VSI */ - if (vsi->type == ICE_VSI_VF) - return 0; - if (vsi->type == ICE_VSI_CHNL) - return 0; - - if (vsi->base_vector) { - dev_dbg(dev, "VSI %d has non-zero base vector %d\n", - vsi->vsi_num, vsi->base_vector); - return -EEXIST; - } - - num_q_vectors = vsi->num_q_vectors; - /* reserve slots from OS requested IRQs */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf) { - struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); - - /* reuse VF control VSI interrupt vector */ - if (ctrl_vsi) { - vsi->base_vector = ctrl_vsi->base_vector; - return 0; - } + /* skip already allocated entries */ + for (i = 0; i < res->end; i++) + if (!(res->list[i] & ICE_RES_VALID_BIT)) + break; - id = ICE_RES_VF_CTRL_VEC_ID; + if (i < res->end) { + res->list[i] = ICE_RES_VALID_BIT; + return i; } else { - id = vsi->idx; - } - - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, id); - if (base < 0) { - dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n", - ice_get_free_res_count(pf->irq_tracker), - ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors); - return -ENOENT; + return -ENOMEM; } - vsi->base_vector = (u16)base; - pf->num_avail_sw_msix -= num_q_vectors; - - return 0; } /** @@ -2387,50 +2270,6 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) ice_vsi_set_dcb_tc_cfg(vsi); } -/** - * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors - * @vsi: VSI to set the q_vectors register index on - */ -static int -ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) -{ - u16 i; - - if (!vsi || !vsi->q_vectors) - return -EINVAL; - - ice_for_each_q_vector(vsi, i) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - if (!q_vector) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n", - i, vsi->vsi_num); - goto clear_reg_idx; - } - - if (vsi->type == ICE_VSI_VF) { - struct ice_vf *vf = vsi->vf; - - q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector); - } else { - q_vector->reg_idx = - q_vector->v_idx + vsi->base_vector; - } - } - - return 0; - -clear_reg_idx: - ice_for_each_q_vector(vsi, i) { - struct ice_q_vector *q_vector = vsi->q_vectors[i]; - - if (q_vector) - q_vector->reg_idx = 0; - } - - return -EINVAL; -} - /** * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured @@ -2675,14 +2514,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_vsi_init; - ret = ice_vsi_setup_vector_base(vsi); - if (ret) - goto unroll_alloc_q_vector; - - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto unroll_vector_base; - ret = ice_vsi_alloc_rings(vsi); if (ret) goto unroll_vector_base; @@ -2733,10 +2564,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_alloc_q_vector; - ret = ice_vsi_set_q_vectors_reg_idx(vsi); - if (ret) - goto unroll_vector_base; - ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base; @@ -2769,8 +2596,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) unroll_vector_base: /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; unroll_alloc_q_vector: ice_vsi_free_q_vectors(vsi); unroll_vsi_init: @@ -2862,20 +2687,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) * many interrupts each VF needs. SR-IOV MSIX resources are also * cleared in the same manner. */ - if (vsi->type == ICE_VSI_CTRL && vsi->vf) { - struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); - - if (!ctrl_vsi) { - ice_free_res(pf->irq_tracker, vsi->base_vector, - ICE_RES_VF_CTRL_VEC_ID); - pf->num_avail_sw_msix += vsi->num_q_vectors; - } - } else if (vsi->type != ICE_VSI_VF) { - /* reclaim SW interrupts back to the common pool */ - ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - vsi->base_vector = 0; - } if (vsi->type == ICE_VSI_VF && vsi->agg_node && vsi->agg_node->valid) @@ -2992,7 +2803,6 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) void ice_vsi_free_irq(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; int i; if (!vsi->q_vectors || !vsi->irqs_ready) @@ -3006,10 +2816,9 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ice_free_cpu_rx_rmap(vsi); ice_for_each_q_vector(vsi, i) { - u16 vector = i + base; int irq_num; - irq_num = pci_irq_vector(pf->pdev, vector); + irq_num = vsi->q_vectors[i]->irq.virq; /* free only the irqs that were actually requested */ if (!vsi->q_vectors[i] || @@ -3141,7 +2950,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) */ void ice_vsi_dis_irq(struct ice_vsi *vsi) { - int base = vsi->base_vector; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; u32 val; @@ -3188,7 +2996,7 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) return; ice_for_each_q_vector(vsi, i) - synchronize_irq(pci_irq_vector(pf->pdev, i + base)); + synchronize_irq(vsi->q_vectors[i]->irq.virq); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 75221478f2dc..2f52f9e32858 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -104,10 +104,10 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked); void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); -int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); +int ice_free_res(struct ice_res_tracker *res, u16 index); int -ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); +ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ce8cd49ae10c..efc621c0bd6c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2490,7 +2490,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) { int q_vectors = vsi->num_q_vectors; struct ice_pf *pf = vsi->back; - int base = vsi->base_vector; struct device *dev; int rx_int_idx = 0; int tx_int_idx = 0; @@ -2501,7 +2500,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) for (vector = 0; vector < q_vectors; vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[vector]; - irq_num = pci_irq_vector(pf->pdev, base + vector); + irq_num = q_vector->irq.virq; if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -2555,9 +2554,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) return 0; free_q_irqs: - while (vector) { - vector--; - irq_num = pci_irq_vector(pf->pdev, base + vector); + while (vector--) { + irq_num = vsi->q_vectors[vector]->irq.virq; if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); @@ -3047,7 +3045,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) wr32(hw, PFINT_OICR_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -3234,7 +3232,7 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) */ static void ice_free_irq_msix_misc(struct ice_pf *pf) { - int misc_irq_num = pci_irq_vector(pf->pdev, pf->oicr_idx); + int misc_irq_num = pf->oicr_irq.virq; struct ice_hw *hw = &pf->hw; ice_dis_ctrlq_interrupts(hw); @@ -3246,8 +3244,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) synchronize_irq(misc_irq_num); devm_free_irq(ice_pf_to_dev(pf), misc_irq_num, pf); - pf->num_avail_sw_msix += 1; - ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID); + ice_free_irq(pf, pf->oicr_irq); } /** @@ -3293,7 +3290,8 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int oicr_idx, err = 0; + struct msi_map oicr_irq; + int err = 0; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -3307,30 +3305,26 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) goto skip_req_irq; /* reserve one vector in irq_tracker for misc interrupts */ - oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); - if (oicr_idx < 0) - return oicr_idx; - - pf->num_avail_sw_msix -= 1; - pf->oicr_idx = (u16)oicr_idx; - - err = devm_request_threaded_irq(dev, - pci_irq_vector(pf->pdev, pf->oicr_idx), - ice_misc_intr, ice_misc_intr_thread_fn, - 0, pf->int_name, pf); + oicr_irq = ice_alloc_irq(pf); + if (oicr_irq.index < 0) + return oicr_irq.index; + + pf->oicr_irq = oicr_irq; + err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, + ice_misc_intr_thread_fn, 0, + pf->int_name, pf); if (err) { dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", pf->int_name, err); - ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); - pf->num_avail_sw_msix += 1; + ice_free_irq(pf, pf->oicr_irq); return err; } skip_req_irq: ice_ena_misc_vector(pf); - ice_ena_ctrlq_interrupts(hw, pf->oicr_idx); - wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx), + ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); + wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); ice_flush(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 972d4f6fd615..d4b6c997141d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -911,7 +911,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) spin_unlock(&tx->lock); /* wait for potentially outstanding interrupt to complete */ - synchronize_irq(pci_irq_vector(pf->pdev, pf->oicr_idx)); + synchronize_irq(pf->oicr_irq.virq); ice_ptp_flush_tx_tracker(pf, tx); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 65f971b74717..0fc2b26a2fa6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -835,7 +835,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) int ret; /* Disable global interrupt 0 so we don't try to handle the VFLR. */ - wr32(hw, GLINT_DYN_CTL(pf->oicr_idx), + wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); set_bit(ICE_OICR_INTR_DIS, pf->state); ice_flush(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 4102416d7a41..a7fe2b4ce655 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -90,7 +90,6 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - int base = vsi->base_vector; u16 reg; u32 val; @@ -103,11 +102,9 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, wr32(hw, QINT_RQCTL(reg), val); if (q_vector) { - u16 v_idx = q_vector->v_idx; - wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0); ice_flush(hw); - synchronize_irq(pci_irq_vector(pf->pdev, v_idx + base)); + synchronize_irq(q_vector->irq.virq); } } -- cgit v1.2.3 From 011670cc340cbc1131677fe233b1a52acee969ee Mon Sep 17 00:00:00 2001 From: Piotr Raczynski Date: Mon, 15 May 2023 21:03:19 +0200 Subject: ice: add dynamic interrupt allocation Currently driver can only allocate interrupt vectors during init phase by calling pci_alloc_irq_vectors. Change that and make use of new pci_msix_alloc_irq_at/pci_msix_free_irq API and enable to allocate and free more interrupts after MSIX has been enabled. Since not all platforms supports dynamic allocation, check it with pci_msix_can_alloc_dyn. Extend the tracker to keep track how many interrupts are allocated initially so when all such vectors are already used, additional interrupts are automatically allocated dynamically. Remember each interrupt allocation method to then free appropriately. Since some features may require interrupts allocated dynamically add appropriate VSI flag and take it into account when allocating new interrupt. Reviewed-by: Michal Swiatkowski Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Piotr Raczynski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 3 + drivers/net/ethernet/intel/ice/ice_base.c | 2 +- drivers/net/ethernet/intel/ice/ice_idc.c | 2 +- drivers/net/ethernet/intel/ice/ice_irq.c | 109 +++++++++++++++++++++++++---- drivers/net/ethernet/intel/ice/ice_irq.h | 5 +- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_sriov.c | 5 +- 7 files changed, 107 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 8541d986ec7f..d637032c8139 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -338,6 +338,9 @@ struct ice_vsi { u32 rx_buf_failed; u32 rx_page_failed; u16 num_q_vectors; + /* tell if only dynamic irq allocation is allowed */ + bool irq_dyn_alloc; + enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index cb0913cb9741..4a12316f7b46 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -137,7 +137,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) } } - q_vector->irq = ice_alloc_irq(pf); + q_vector->irq = ice_alloc_irq(pf, vsi->irq_dyn_alloc); if (q_vector->irq.index < 0) { err = -ENOMEM; goto err_free_q_vector; diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index bc016bb4440c..145b27f2a4ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -250,7 +250,7 @@ static int ice_alloc_rdma_qvectors(struct ice_pf *pf) struct msix_entry *entry = &pf->msix_entries[i]; struct msi_map map; - map = ice_alloc_irq(pf); + map = ice_alloc_irq(pf, false); if (map.index < 0) break; diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index 1713347c577f..ad82ff7d1995 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -9,11 +9,14 @@ * ice_init_irq_tracker - initialize interrupt tracker * @pf: board private structure * @max_vectors: maximum number of vectors that tracker can hold + * @num_static: number of preallocated interrupts */ static void -ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors) +ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors, + unsigned int num_static) { pf->irq_tracker.num_entries = max_vectors; + pf->irq_tracker.num_static = num_static; xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC); } @@ -42,6 +45,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) /** * ice_get_irq_res - get an interrupt resource * @pf: board private structure + * @dyn_only: force entry to be dynamically allocated * * Allocate new irq entry in the free slot of the tracker. Since xarray * is used, always allocate new entry at the lowest possible index. Set @@ -49,10 +53,11 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) * * Returns allocated irq entry or NULL on failure. */ -static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf) +static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) { struct xa_limit limit = { .max = pf->irq_tracker.num_entries, .min = 0 }; + unsigned int num_static = pf->irq_tracker.num_static; struct ice_irq_entry *entry; unsigned int index; int ret; @@ -61,6 +66,10 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf) if (!entry) return NULL; + /* skip preallocated entries if the caller says so */ + if (dyn_only) + limit.min = num_static; + ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, GFP_KERNEL); @@ -69,6 +78,7 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf) entry = NULL; } else { entry->index = index; + entry->dynamic = index >= num_static; } return entry; @@ -241,14 +251,20 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf) */ int ice_init_interrupt_scheme(struct ice_pf *pf) { - int vectors; + int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; + int vectors, max_vectors; vectors = ice_ena_msix_range(pf); if (vectors < 0) - return vectors; + return -ENOMEM; + + if (pci_msix_can_alloc_dyn(pf->pdev)) + max_vectors = total_vectors; + else + max_vectors = vectors; - ice_init_irq_tracker(pf, vectors); + ice_init_irq_tracker(pf, max_vectors, vectors); return 0; } @@ -256,33 +272,55 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) /** * ice_alloc_irq - Allocate new interrupt vector * @pf: board private structure + * @dyn_only: force dynamic allocation of the interrupt * * Allocate new interrupt vector for a given owner id. * return struct msi_map with interrupt details and track * allocated interrupt appropriately. * - * This function mimics individual interrupt allocation, - * even interrupts are actually already allocated with - * pci_alloc_irq_vectors. Individual allocation helps - * to track interrupts and simplifies interrupt related - * handling. + * This function reserves new irq entry from the irq_tracker. + * if according to the tracker information all interrupts that + * were allocated with ice_pci_alloc_irq_vectors are already used + * and dynamically allocated interrupts are supported then new + * interrupt will be allocated with pci_msix_alloc_irq_at. + * + * Some callers may only support dynamically allocated interrupts. + * This is indicated with dyn_only flag. * * On failure, return map with negative .index. The caller * is expected to check returned map index. * */ -struct msi_map ice_alloc_irq(struct ice_pf *pf) +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) { + int sriov_base_vector = pf->sriov_base_vector; struct msi_map map = { .index = -ENOENT }; + struct device *dev = ice_pf_to_dev(pf); struct ice_irq_entry *entry; - entry = ice_get_irq_res(pf); + entry = ice_get_irq_res(pf, dyn_only); if (!entry) return map; - map.index = entry->index; - map.virq = pci_irq_vector(pf->pdev, map.index); + /* fail if we're about to violate SRIOV vectors space */ + if (sriov_base_vector && entry->index >= sriov_base_vector) + goto exit_free_res; + + if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) { + map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL); + if (map.index < 0) + goto exit_free_res; + dev_dbg(dev, "allocated new irq at index %d\n", map.index); + } else { + map.index = entry->index; + map.virq = pci_irq_vector(pf->pdev, map.index); + } + + return map; +exit_free_res: + dev_err(dev, "Could not allocate irq at idx %d\n", entry->index); + ice_free_irq_res(pf, entry->index); return map; } @@ -291,9 +329,50 @@ struct msi_map ice_alloc_irq(struct ice_pf *pf) * @pf: board private structure * @map: map with interrupt details * - * Remove allocated interrupt from the interrupt tracker. + * Remove allocated interrupt from the interrupt tracker. If interrupt was + * allocated dynamically, free respective interrupt vector. */ void ice_free_irq(struct ice_pf *pf, struct msi_map map) { + struct ice_irq_entry *entry; + + entry = xa_load(&pf->irq_tracker.entries, map.index); + + if (!entry) { + dev_err(ice_pf_to_dev(pf), "Failed to get MSIX interrupt entry at index %d", + map.index); + return; + } + + dev_dbg(ice_pf_to_dev(pf), "Free irq at index %d\n", map.index); + + if (entry->dynamic) + pci_msix_free_irq(pf->pdev, map); + ice_free_irq_res(pf, map.index); } + +/** + * ice_get_max_used_msix_vector - Get the max used interrupt vector + * @pf: board private structure + * + * Return index of maximum used interrupt vectors with respect to the + * beginning of the MSIX table. Take into account that some interrupts + * may have been dynamically allocated after MSIX was initially enabled. + */ +int ice_get_max_used_msix_vector(struct ice_pf *pf) +{ + unsigned long start, index, max_idx; + void *entry; + + /* Treat all preallocated interrupts as used */ + start = pf->irq_tracker.num_static; + max_idx = start - 1; + + xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) { + if (index > max_idx) + max_idx = index; + } + + return max_idx; +} diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h index da5cdb1f0d3a..f35efc08575e 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.h +++ b/drivers/net/ethernet/intel/ice/ice_irq.h @@ -6,17 +6,20 @@ struct ice_irq_entry { unsigned int index; + bool dynamic; /* allocation type flag */ }; struct ice_irq_tracker { struct xarray entries; u16 num_entries; /* total vectors available */ + u16 num_static; /* preallocated entries */ }; int ice_init_interrupt_scheme(struct ice_pf *pf); void ice_clear_interrupt_scheme(struct ice_pf *pf); -struct msi_map ice_alloc_irq(struct ice_pf *pf); +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only); void ice_free_irq(struct ice_pf *pf, struct msi_map map); +int ice_get_max_used_msix_vector(struct ice_pf *pf); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index efc621c0bd6c..62e91512aeab 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3305,7 +3305,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) goto skip_req_irq; /* reserve one vector in irq_tracker for misc interrupts */ - oicr_irq = ice_alloc_irq(pf); + oicr_irq = ice_alloc_irq(pf, false); if (oicr_irq.index < 0) return oicr_irq.index; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 195105ce9039..80c643fb9f2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -418,7 +418,7 @@ int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) { u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = pf->irq_tracker.num_entries; + int vectors_used = ice_get_max_used_msix_vector(pf); int sriov_base_vector; sriov_base_vector = total_vectors - num_msix_needed; @@ -458,6 +458,7 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) */ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { + int vectors_used = ice_get_max_used_msix_vector(pf); u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); @@ -470,7 +471,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) /* determine MSI-X resources per VF */ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - pf->irq_tracker.num_entries; + vectors_used; msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; -- cgit v1.2.3 From 6e8b2c88fc8cf95ed09de25946b20b7536c88cd5 Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Thu, 1 Jun 2023 14:15:03 -0700 Subject: ice: handle extts in the miscellaneous interrupt thread The ice_ptp_extts_work() and ice_ptp_periodic_work() functions are both scheduled on the same kthread worker, pf.ptp.kworker. The ice_ptp_periodic_work() function sends to the firmware to interact with the PHY, and must block to wait for responses. This can cause delay in responding to the PFINT_OICR_TSYN_EVNT interrupt cause, ultimately resulting in disruption to processing an input signal of the frequency is high enough. In our testing, even 100 Hz signals get disrupted. Fix this by instead processing the signal inside the miscellaneous interrupt thread prior to handling Tx timestamps. Use atomic bits in a new pf->misc_thread bitmap in order to safely communicate which tasks require processing within the ice_misc_intr_thread_fn(). This ensures the communication of desired tasks from the ice_misc_intr() are correctly processed without racing even in the event that the interrupt triggers again before the thread function exits. Fixes: 172db5f91d5f ("ice: add support for auxiliary input/output pins") Signed-off-by: Karol Kolacinski Signed-off-by: Jacob Keller Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 7 +++++++ drivers/net/ethernet/intel/ice/ice_main.c | 29 +++++++++++++++++++++-------- drivers/net/ethernet/intel/ice/ice_ptp.c | 12 +++--------- drivers/net/ethernet/intel/ice/ice_ptp.h | 4 ++-- 4 files changed, 33 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b4bca1d964a9..4ba3d99439a0 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -508,6 +508,12 @@ enum ice_pf_flags { ICE_PF_FLAGS_NBITS /* must be last */ }; +enum ice_misc_thread_tasks { + ICE_MISC_THREAD_EXTTS_EVENT, + ICE_MISC_THREAD_TX_TSTAMP, + ICE_MISC_THREAD_NBITS /* must be last */ +}; + struct ice_switchdev_info { struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; @@ -550,6 +556,7 @@ struct ice_pf { DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); + DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */ unsigned long serv_tmr_period; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 62e91512aeab..314a42808e39 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3139,20 +3139,28 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing) + if (!hw->reset_ongoing) { + set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); ret = IRQ_WAKE_THREAD; + } } if (oicr & PFINT_OICR_TSYN_EVNT_M) { u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx)); - /* Save EVENTs from GTSYN register */ - pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M | - GLTSYN_STAT_EVENT1_M | - GLTSYN_STAT_EVENT2_M); ena_mask &= ~PFINT_OICR_TSYN_EVNT_M; - kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work); + + if (hw->func_caps.ts_func_info.src_tmr_owned) { + /* Save EVENTs from GLTSYN register */ + pf->ptp.ext_ts_irq |= gltsyn_stat & + (GLTSYN_STAT_EVENT0_M | + GLTSYN_STAT_EVENT1_M | + GLTSYN_STAT_EVENT2_M); + + set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread); + ret = IRQ_WAKE_THREAD; + } } #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) @@ -3196,8 +3204,13 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) if (ice_is_reset_in_progress(pf->state)) return IRQ_HANDLED; - while (!ice_ptp_process_ts(pf)) - usleep_range(50, 100); + if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread)) + ice_ptp_extts_event(pf); + + if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { + while (!ice_ptp_process_ts(pf)) + usleep_range(50, 100); + } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index d4b6c997141d..6f51ebaf1d70 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -1458,15 +1458,11 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) } /** - * ice_ptp_extts_work - Workqueue task function - * @work: external timestamp work structure - * - * Service for PTP external clock event + * ice_ptp_extts_event - Process PTP external clock event + * @pf: Board private structure */ -static void ice_ptp_extts_work(struct kthread_work *work) +void ice_ptp_extts_event(struct ice_pf *pf) { - struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work); - struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); struct ptp_clock_event event; struct ice_hw *hw = &pf->hw; u8 chan, tmr_idx; @@ -2558,7 +2554,6 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf) ice_ptp_cfg_timestamp(pf, false); kthread_cancel_delayed_work_sync(&ptp->work); - kthread_cancel_work_sync(&ptp->extts_work); if (test_bit(ICE_PFR_REQ, pf->state)) return; @@ -2656,7 +2651,6 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) /* Initialize work functions */ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work); - kthread_init_work(&ptp->extts_work, ice_ptp_extts_work); /* Allocate a kworker for handling work required for the ports * connected to the PTP hardware clock. diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 9cda2f43e0e5..9f8902c1e743 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -169,7 +169,6 @@ struct ice_ptp_port { * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK * @port: data for the PHY port initialization procedure * @work: delayed work function for periodic tasks - * @extts_work: work function for handling external Tx timestamps * @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_jiffies: jiffies when cached_phc_time was last updated * @ext_ts_chan: the external timestamp channel in use @@ -190,7 +189,6 @@ struct ice_ptp_port { struct ice_ptp { struct ice_ptp_port port; struct kthread_delayed_work work; - struct kthread_work extts_work; u64 cached_phc_time; unsigned long cached_phc_jiffies; u8 ext_ts_chan; @@ -256,6 +254,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr); void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena); int ice_get_ptp_clock_index(struct ice_pf *pf); +void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); bool ice_ptp_process_ts(struct ice_pf *pf); @@ -284,6 +283,7 @@ static inline int ice_get_ptp_clock_index(struct ice_pf *pf) return -1; } +static inline void ice_ptp_extts_event(struct ice_pf *pf) { } static inline s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) { -- cgit v1.2.3 From d578e618f192f453baf4fd7e32fec88ed7e678b8 Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Thu, 1 Jun 2023 14:15:04 -0700 Subject: ice: always return IRQ_WAKE_THREAD in ice_misc_intr() Refactor the ice_misc_intr() function to always return IRQ_WAKE_THREAD, and schedule the service task during the soft IRQ thread function instead of at the end of the hard IRQ handler. Remove the duplicate call to ice_service_task_schedule() that happened when we got a PCI exception. Signed-off-by: Karol Kolacinski Signed-off-by: Jacob Keller Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 314a42808e39..45dab7f62198 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3058,7 +3058,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) { struct ice_pf *pf = (struct ice_pf *)data; struct ice_hw *hw = &pf->hw; - irqreturn_t ret = IRQ_NONE; struct device *dev; u32 oicr, ena_mask; @@ -3139,10 +3138,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (!hw->reset_ongoing) { + if (!hw->reset_ongoing) set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); - ret = IRQ_WAKE_THREAD; - } } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3159,7 +3156,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) GLTSYN_STAT_EVENT2_M); set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread); - ret = IRQ_WAKE_THREAD; } } @@ -3180,16 +3176,12 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & (PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_ECC_ERR_M)) { set_bit(ICE_PFR_REQ, pf->state); - ice_service_task_schedule(pf); } } - if (!ret) - ret = IRQ_HANDLED; - ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); - return ret; + return IRQ_WAKE_THREAD; } /** @@ -3204,6 +3196,8 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) if (ice_is_reset_in_progress(pf->state)) return IRQ_HANDLED; + ice_service_task_schedule(pf); + if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread)) ice_ptp_extts_event(pf); -- cgit v1.2.3 From ae39eb42dd06e058215d0f782365b84039d686d4 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 1 Jun 2023 14:15:05 -0700 Subject: ice: introduce ICE_TX_TSTAMP_WORK enumeration The ice_ptp_process_ts() function and its various helper functions return a boolean value indicating whether any work is remaining. This use of a boolean has grown confusing as we have multiple helpers that pass status between each other. Readers must be aware of what "true" and "false" mean, and it is very easy to get their meaning inverted. The names of the functions are not standard "yes/no" questions, which is the best practice for boolean returns. Replace this use of an enumeration with a custom type, enum ice_tx_tstamp_work. This enumeration clearly indicates whether all work is done, or if more work is pending. To aid in readability, factor the actual list iteration and processing out into ice_ptp_process_tx_tstamp(), making it void. Then call this in ice_ptp_tx_tstamp() ensuring that we always check the Tracker list at the end when determining the appropriate return value. Now the return value is an explicit name instead of the true or false value. This is easier to follow and makes reading the resulting callers much simpler. In addition, this paves the way for future work to allow E822 hardware to process timestamps for all functions using a single interrupt on the clock owning PF. Signed-off-by: Jacob Keller Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 2 +- drivers/net/ethernet/intel/ice/ice_ptp.c | 50 ++++++++++++++++++------------- drivers/net/ethernet/intel/ice/ice_ptp.h | 12 +++++++- 3 files changed, 42 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 45dab7f62198..6811e2a3c154 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3202,7 +3202,7 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) ice_ptp_extts_event(pf); if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { - while (!ice_ptp_process_ts(pf)) + while (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) usleep_range(50, 100); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 6f51ebaf1d70..81d96a40d5a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -617,7 +617,7 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) } /** - * ice_ptp_tx_tstamp - Process Tx timestamps for a port + * ice_ptp_process_tx_tstamp - Process Tx timestamps for a port * @tx: the PTP Tx timestamp tracker * * Process timestamps captured by the PHY associated with this port. To do @@ -633,15 +633,6 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value * 7) send this 64 bit timestamp to the stack * - * Returns true if all timestamps were handled, and false if any slots remain - * without a timestamp. - * - * After looping, if we still have waiting SKBs, return false. This may cause - * us effectively poll even when not strictly necessary. We do this because - * it's possible a new timestamp was requested around the same time as the - * interrupt. In some cases hardware might not interrupt us again when the - * timestamp is captured. - * * Note that we do not hold the tracking lock while reading the Tx timestamp. * This is because reading the timestamp requires taking a mutex that might * sleep. @@ -673,10 +664,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) * the packet will never be sent by hardware and discard it without reading * the timestamp register. */ -static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) +static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) { struct ice_ptp_port *ptp_port; - bool more_timestamps; struct ice_pf *pf; struct ice_hw *hw; u64 tstamp_ready; @@ -685,7 +675,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) u8 idx; if (!tx->init) - return true; + return; ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); @@ -694,7 +684,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) /* Read the Tx ready status first */ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); if (err) - return false; + return; /* Drop packets if the link went down */ link_up = ptp_port->link_up; @@ -782,15 +772,34 @@ skip_ts_read: skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); } +} - /* Check if we still have work to do. If so, re-queue this task to - * poll for remaining timestamps. - */ +/** + * ice_ptp_tx_tstamp - Process Tx timestamps for this function. + * @tx: Tx tracking structure to initialize + * + * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete + * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. + */ +static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) +{ + bool more_timestamps; + + if (!tx->init) + return ICE_TX_TSTAMP_WORK_DONE; + + /* Process the Tx timestamp tracker */ + ice_ptp_process_tx_tstamp(tx); + + /* Check if there are outstanding Tx timestamps */ spin_lock(&tx->lock); more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); spin_unlock(&tx->lock); - return !more_timestamps; + if (more_timestamps) + return ICE_TX_TSTAMP_WORK_PENDING; + + return ICE_TX_TSTAMP_WORK_DONE; } /** @@ -2426,9 +2435,10 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb) * ice_ptp_process_ts - Process the PTP Tx timestamps * @pf: Board private structure * - * Returns true if timestamps are processed. + * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx + * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. */ -bool ice_ptp_process_ts(struct ice_pf *pf) +enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) { return ice_ptp_tx_tstamp(&pf->ptp.port.tx); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 9f8902c1e743..995a57019ba7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -108,6 +108,16 @@ struct ice_tx_tstamp { u64 cached_tstamp; }; +/** + * enum ice_tx_tstamp_work - Status of Tx timestamp work function + * @ICE_TX_TSTAMP_WORK_DONE: Tx timestamp processing is complete + * @ICE_TX_TSTAMP_WORK_PENDING: More Tx timestamps are pending + */ +enum ice_tx_tstamp_work { + ICE_TX_TSTAMP_WORK_DONE = 0, + ICE_TX_TSTAMP_WORK_PENDING, +}; + /** * struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port * @lock: lock to prevent concurrent access to fields of this struct @@ -256,7 +266,7 @@ int ice_get_ptp_clock_index(struct ice_pf *pf); void ice_ptp_extts_event(struct ice_pf *pf); s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); -bool ice_ptp_process_ts(struct ice_pf *pf); +enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); void ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, -- cgit v1.2.3 From 78c50d6961fc05491ebbc71c35d87324b1a4f49a Mon Sep 17 00:00:00 2001 From: Kamil Maziarz Date: Tue, 6 Jun 2023 12:33:58 +0200 Subject: ice: Fix XDP memory leak when NIC is brought up and down Fix the buffer leak that occurs while switching the port up and down with traffic and XDP by checking for an active XDP program and freeing all empty TX buffers. Fixes: efc2214b6047 ("ice: Add support for XDP") Signed-off-by: Kamil Maziarz Tested-by: Chandan Kumar Rout (A Contingent Worker at Intel) Acked-by: Maciej Fijalkowski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1f7c8edc22f..03513d4871ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7056,6 +7056,10 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); + if (ice_is_xdp_ena_vsi(vsi)) + ice_for_each_xdp_txq(vsi, i) + ice_clean_tx_ring(vsi->xdp_rings[i]); + ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); -- cgit v1.2.3 From 9a8648cce8d8a4a7770b45239912e20edb9736ad Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 1 Jun 2023 14:15:06 -0700 Subject: ice: trigger PFINT_OICR_TSYN_TX interrupt instead of polling In ice_misc_intr_thread_fn(), if we do not complete all Tx timestamp work, the thread function will poll continuously forever. For E822 hardware, this wastes time as the return value from ice_ptp_process_ts() is accurate and always reports correctly that the PHY actually has new timestamp data. In addition, if we receive enough timestamps with the right pacing, we may never exit this polling. Should this occur, other tasks handled by the ice_misc_intr_thread_fn() will never be processed. Fix this by instead writing to PFINT_OICR, causing an emulated interrupt to be triggered immediately. This does take slightly more processing than just re-checking the timestamps. However, it allows all of the other interrupt causes a chance to be processed first in the hard IRQ function. Note that the OICR interrupt is configured to be throttled to no more than once every 124 microseconds. This gives an effective interrupt rate of ~8000 interrupts per second. This should thus not cause a significant increase in overall CPU usage when compared to sleeping. Signed-off-by: Jacob Keller Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 6811e2a3c154..2665f72b5461 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3202,8 +3202,15 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) ice_ptp_extts_event(pf); if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { - while (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) - usleep_range(50, 100); + struct ice_hw *hw = &pf->hw; + + /* Process outstanding Tx timestamps. If there is more work, + * re-arm the interrupt to trigger again. + */ + if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } } return IRQ_HANDLED; -- cgit v1.2.3 From 0ec38df36ea1cc4f21bf7cd61a89942b034883c5 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 1 Jun 2023 14:15:07 -0700 Subject: ice: do not re-enable miscellaneous interrupt until thread_fn completes The ice driver uses threaded IRQ for managing Tx timestamps via the devm_request_threaded_irq() interface. The ice_misc_intr() handler function is responsible for processing the hard interrupt context, and can wake the ice_misc_intr_thread_fn() by returning IRQ_WAKE_THREAD. The request_threaded_irq() function comment says: @handler is still called in hard interrupt context and has to check whether the interrupt originates from the device. If yes, it needs to disable the interrupt on the device and return IRQ_WAKE_THREAD which will wake up the handler thread and run the @thread_fn. We currently re-enable the Other Interrupt Cause Register (OCIR) at the end of ice_misc_intr(). In practice, this seems to be ok, but it can make communicating between the handler function and the thread function difficult. This is because the interrupt can trigger again while the thread function is still processing. Move the OICR update to the end of the thread function, leaving the other interrupt cause disabled in hardware until we complete one pass of the thread function. This prevents the miscellaneous interrupt from firing until after we finish the thread function. Signed-off-by: Jacob Keller Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2665f72b5461..aa57d26a0ac7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3179,8 +3179,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } } - ice_irq_dynamic_ena(hw, NULL, NULL); - return IRQ_WAKE_THREAD; } @@ -3192,6 +3190,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) { struct ice_pf *pf = data; + struct ice_hw *hw; + + hw = &pf->hw; if (ice_is_reset_in_progress(pf->state)) return IRQ_HANDLED; @@ -3202,8 +3203,6 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) ice_ptp_extts_event(pf); if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { - struct ice_hw *hw = &pf->hw; - /* Process outstanding Tx timestamps. If there is more work, * re-arm the interrupt to trigger again. */ @@ -3213,6 +3212,8 @@ static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) } } + ice_irq_dynamic_ena(hw, NULL, NULL); + return IRQ_HANDLED; } -- cgit v1.2.3 From 24b454bc354ab7b1aa918a4fe3d7696516f592d4 Mon Sep 17 00:00:00 2001 From: Jakub Buchocki Date: Mon, 12 Jun 2023 10:14:21 -0700 Subject: ice: Fix ice module unload Clearing the interrupt scheme before PFR reset, during the removal routine, could cause the hardware errors and possibly lead to system reboot, as the PF reset can cause the interrupt to be generated. Place the call for PFR reset inside ice_deinit_dev(), wait until reset and all pending transactions are done, then call ice_clear_interrupt_scheme(). This introduces a PFR reset to multiple error paths. Additionally, remove the call for the reset from ice_load() - it will be a part of ice_unload() now. Error example: [ 75.229328] ice 0000:ca:00.1: Failed to read Tx Scheduler Tree - User Selection data from flash [ 77.571315] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1 [ 77.571418] {1}[Hardware Error]: event severity: recoverable [ 77.571459] {1}[Hardware Error]: Error 0, type: recoverable [ 77.571500] {1}[Hardware Error]: section_type: PCIe error [ 77.571540] {1}[Hardware Error]: port_type: 4, root port [ 77.571580] {1}[Hardware Error]: version: 3.0 [ 77.571615] {1}[Hardware Error]: command: 0x0547, status: 0x4010 [ 77.571661] {1}[Hardware Error]: device_id: 0000:c9:02.0 [ 77.571703] {1}[Hardware Error]: slot: 25 [ 77.571736] {1}[Hardware Error]: secondary_bus: 0xca [ 77.571773] {1}[Hardware Error]: vendor_id: 0x8086, device_id: 0x347a [ 77.571821] {1}[Hardware Error]: class_code: 060400 [ 77.571858] {1}[Hardware Error]: bridge: secondary_status: 0x2800, control: 0x0013 [ 77.572490] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020 [ 77.572870] pcieport 0000:c9:02.0: [21] ACSViol (First) [ 77.573222] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID [ 77.573554] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010 [ 77.691273] {2}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1 [ 77.691738] {2}[Hardware Error]: event severity: recoverable [ 77.691971] {2}[Hardware Error]: Error 0, type: recoverable [ 77.692192] {2}[Hardware Error]: section_type: PCIe error [ 77.692403] {2}[Hardware Error]: port_type: 4, root port [ 77.692616] {2}[Hardware Error]: version: 3.0 [ 77.692825] {2}[Hardware Error]: command: 0x0547, status: 0x4010 [ 77.693032] {2}[Hardware Error]: device_id: 0000:c9:02.0 [ 77.693238] {2}[Hardware Error]: slot: 25 [ 77.693440] {2}[Hardware Error]: secondary_bus: 0xca [ 77.693641] {2}[Hardware Error]: vendor_id: 0x8086, device_id: 0x347a [ 77.693853] {2}[Hardware Error]: class_code: 060400 [ 77.694054] {2}[Hardware Error]: bridge: secondary_status: 0x0800, control: 0x0013 [ 77.719115] pci 0000:ca:00.1: AER: can't recover (no error_detected callback) [ 77.719140] pcieport 0000:c9:02.0: AER: device recovery failed [ 77.719216] pcieport 0000:c9:02.0: AER: aer_status: 0x00200000, aer_mask: 0x00100020 [ 77.719390] pcieport 0000:c9:02.0: [21] ACSViol (First) [ 77.719557] pcieport 0000:c9:02.0: AER: aer_layer=Transaction Layer, aer_agent=Receiver ID [ 77.719723] pcieport 0000:c9:02.0: AER: aer_uncor_severity: 0x00463010 Fixes: 5b246e533d01 ("ice: split probe into smaller functions") Signed-off-by: Jakub Buchocki Reviewed-by: Przemek Kitszel Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230612171421.21570-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_main.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 03513d4871ab..42c318ceff61 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4802,9 +4802,13 @@ err_init_pf: static void ice_deinit_dev(struct ice_pf *pf) { ice_free_irq_msix_misc(pf); - ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); ice_deinit_hw(&pf->hw); + + /* Service task is already stopped, so call reset directly. */ + ice_reset(&pf->hw, ICE_RESET_PFR); + pci_wait_for_pending_transaction(pf->pdev); + ice_clear_interrupt_scheme(pf); } static void ice_init_features(struct ice_pf *pf) @@ -5094,10 +5098,6 @@ int ice_load(struct ice_pf *pf) struct ice_vsi *vsi; int err; - err = ice_reset(&pf->hw, ICE_RESET_PFR); - if (err) - return err; - err = ice_init_dev(pf); if (err) return err; @@ -5354,12 +5354,6 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); - /* Issue a PFR as part of the prescribed driver unload flow. Do not - * do it via ice_schedule_reset() since there is no need to rebuild - * and the service task is already stopped. - */ - ice_reset(&pf->hw, ICE_RESET_PFR); - pci_wait_for_pending_transaction(pdev); pci_disable_device(pdev); } -- cgit v1.2.3 From 96868cca7971a5a3887717fdacd44b281fb87cc9 Mon Sep 17 00:00:00 2001 From: Piotr Gardocki Date: Wed, 14 Jun 2023 16:53:02 +0200 Subject: ice: remove unnecessary check for old MAC == new MAC The check has been moved to core. The ndo_set_mac_address callback is not being called with new MAC address equal to the old one anymore. Signed-off-by: Piotr Gardocki Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_main.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a0283b5bf65f..65bf399a0efc 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5624,11 +5624,6 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) if (!is_valid_ether_addr(mac)) return -EADDRNOTAVAIL; - if (ether_addr_equal(netdev->dev_addr, mac)) { - netdev_dbg(netdev, "already using mac %pM\n", mac); - return 0; - } - if (test_bit(ICE_DOWN, pf->state) || ice_is_reset_in_progress(pf->state)) { netdev_err(netdev, "can't set mac %pM. device not ready\n", -- cgit v1.2.3 From 469748429ac81f0a6a344637fc9d3b1d16a9f3d8 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Thu, 15 Jun 2023 13:33:26 +0200 Subject: ice: allow hot-swapping XDP programs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently ice driver's .ndo_bpf callback brings interface down and up independently of XDP resources' presence. This is only needed when either these resources have to be configured or removed. It means that if one is switching XDP programs on-the-fly with running traffic, packets will be dropped. To avoid this, compare early on ice_xdp_setup_prog() state of incoming bpf_prog pointer vs the bpf_prog pointer that is already assigned to VSI. Do the swap in case VSI has bpf_prog and incoming one are non-NULL. Lastly, while at it, put old bpf_prog *after* the update of Rx ring's bpf_prog pointer. In theory previous code could expose us to a state where Rx ring's bpf_prog would still be referring to old_prog that got released with earlier bpf_prog_put(). Signed-off-by: Maciej Fijalkowski Acked-by: Toke Høiland-Jørgensen Reviewed-by: Alexander Lobakin Tested-by: Chandan Kumar Rout (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 65bf399a0efc..5dd88611141e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2633,11 +2633,11 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) int i; old_prog = xchg(&vsi->xdp_prog, prog); - if (old_prog) - bpf_prog_put(old_prog); - ice_for_each_rxq(vsi, i) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); + + if (old_prog) + bpf_prog_put(old_prog); } /** @@ -2922,6 +2922,12 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } } + /* hot swap progs and avoid toggling link */ + if (ice_is_xdp_ena_vsi(vsi) == !!prog) { + ice_vsi_assign_bpf_prog(vsi, prog); + return 0; + } + /* need to stop netdev while setting up the program for Rx rings */ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { ret = ice_down(vsi); @@ -2954,13 +2960,6 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, xdp_ring_err = ice_realloc_zc_buf(vsi, false); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed"); - } else { - /* safe to call even when prog == vsi->xdp_prog as - * dev_xdp_install in net/core/dev.c incremented prog's - * refcount so corresponding bpf_prog_put won't cause - * underflow - */ - ice_vsi_assign_bpf_prog(vsi, prog); } if (if_running) -- cgit v1.2.3 From b7a0345723385c3cc0438cf4266ccc110dc7b583 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Tue, 13 Jun 2023 13:35:52 +0200 Subject: ice: use ice_down_up() where applicable ice_change_mtu() is currently using a separate ice_down() and ice_up() calls to reflect changed MTU. ice_down_up() serves this purpose, so do the refactoring here. Signed-off-by: Maciej Fijalkowski Reviewed-by: Przemek Kitszel Reviewed-by: Simon Horman Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 5dd88611141e..93979ab18bc1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7412,21 +7412,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) } netdev->mtu = (unsigned int)new_mtu; - - /* if VSI is up, bring it down and then back up */ - if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { - err = ice_down(vsi); - if (err) { - netdev_err(netdev, "change MTU if_down err %d\n", err); - return err; - } - - err = ice_up(vsi); - if (err) { - netdev_err(netdev, "change MTU if_up err %d\n", err); - return err; - } - } + err = ice_down_up(vsi); + if (err) + return err; netdev_dbg(netdev, "changed MTU to %d\n", new_mtu); set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); -- cgit v1.2.3 From 5f16da6ee6ac32e6c8098bc4cfcc4f170694f9da Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Fri, 9 Jun 2023 17:40:23 -0700 Subject: ice: Fix max_rate check while configuring TX rate limits Remove incorrect check in ice_validate_mqprio_opt() that limits filter configuration when sum of max_rates of all TCs exceeds the link speed. The max rate of each TC is unrelated to value used by other TCs and is valid as long as it is less than link speed. Fixes: fbc7b27af0f9 ("ice: enable ndo_setup_tc support for mqprio_qdisc") Signed-off-by: Sridhar Samudrala Signed-off-by: Sudheer Mogilappagari Tested-by: Bharathi Sreenivas Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 93979ab18bc1..64efe4c83a3e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7872,10 +7872,10 @@ static int ice_validate_mqprio_qopt(struct ice_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { - u64 sum_max_rate = 0, sum_min_rate = 0; int non_power_of_2_qcount = 0; struct ice_pf *pf = vsi->back; int max_rss_q_cnt = 0; + u64 sum_min_rate = 0; struct device *dev; int i, speed; u8 num_tc; @@ -7891,6 +7891,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, dev = ice_pf_to_dev(pf); vsi->ch_rss_size = 0; num_tc = mqprio_qopt->qopt.num_tc; + speed = ice_get_link_speed_kbps(vsi); for (i = 0; num_tc; i++) { int qcount = mqprio_qopt->qopt.count[i]; @@ -7931,7 +7932,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, */ max_rate = mqprio_qopt->max_rate[i]; max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR); - sum_max_rate += max_rate; /* min_rate is minimum guaranteed rate and it can't be zero */ min_rate = mqprio_qopt->min_rate[i]; @@ -7944,6 +7944,12 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, return -EINVAL; } + if (max_rate && max_rate > speed) { + dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n", + i, max_rate, speed); + return -EINVAL; + } + iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem); if (rem) { dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps", @@ -7981,12 +7987,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi, (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) return -EINVAL; - speed = ice_get_link_speed_kbps(vsi); - if (sum_max_rate && sum_max_rate > (u64)speed) { - dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n", - sum_max_rate, speed); - return -EINVAL; - } if (sum_min_rate && sum_min_rate > (u64)speed) { dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n", sum_min_rate, speed); -- cgit v1.2.3 From 479cdfe388a04a16fdd127f3e9e9e019e45e5573 Mon Sep 17 00:00:00 2001 From: Sridhar Samudrala Date: Fri, 9 Jun 2023 17:40:24 -0700 Subject: ice: Fix tx queue rate limit when TCs are configured Configuring tx_maxrate via sysfs interface /sys/class/net/eth0/queues/tx-1/tx_maxrate was not working when TCs are configured because always main VSI was being used. Fix by using correct VSI in ice_set_tx_maxrate when TCs are configured. Fixes: 1ddef455f4a8 ("ice: Add NDO callback to set the maximum per-queue bitrate") Signed-off-by: Sridhar Samudrala Signed-off-by: Sudheer Mogilappagari Tested-by: Bharathi Sreenivas Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 7 +++++++ drivers/net/ethernet/intel/ice/ice_tc_lib.c | 22 +++++++++++----------- drivers/net/ethernet/intel/ice/ice_tc_lib.h | 1 + 3 files changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel/ice/ice_main.c') diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 64efe4c83a3e..19a5e7f3a075 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5739,6 +5739,13 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) q_handle = vsi->tx_rings[queue_index]->q_handle; tc = ice_dcb_get_tc(vsi, queue_index); + vsi = ice_locate_vsi_using_queue(vsi, queue_index); + if (!vsi) { + netdev_err(netdev, "Invalid VSI for given queue %d\n", + queue_index); + return -EINVAL; + } + /* Set BW back to default, when user set maxrate to 0 */ if (!maxrate) status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index b54052ef6050..4a34ef5f58d3 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -750,17 +750,16 @@ exit: /** * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) * @vsi: Pointer to VSI - * @tc_fltr: Pointer to tc_flower_filter + * @queue: Queue index * - * Locate the VSI using specified queue. When ADQ is not enabled, always - * return input VSI, otherwise locate corresponding VSI based on per channel - * offset and qcount + * Locate the VSI using specified "queue". When ADQ is not enabled, + * always return input VSI, otherwise locate corresponding + * VSI based on per channel "offset" and "qcount" */ -static struct ice_vsi * -ice_locate_vsi_using_queue(struct ice_vsi *vsi, - struct ice_tc_flower_fltr *tc_fltr) +struct ice_vsi * +ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue) { - int num_tc, tc, queue; + int num_tc, tc; /* if ADQ is not active, passed VSI is the candidate VSI */ if (!ice_is_adq_active(vsi->back)) @@ -770,7 +769,6 @@ ice_locate_vsi_using_queue(struct ice_vsi *vsi, * upon queue number) */ num_tc = vsi->mqprio_qopt.qopt.num_tc; - queue = tc_fltr->action.fwd.q.queue; for (tc = 0; tc < num_tc; tc++) { int qcount = vsi->mqprio_qopt.qopt.count[tc]; @@ -812,6 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) struct ice_pf *pf = vsi->back; struct device *dev; u32 tc_class; + int q; dev = ice_pf_to_dev(pf); @@ -840,7 +839,8 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) /* Determine destination VSI even though the action is * FWD_TO_QUEUE, because QUEUE is associated with VSI */ - dest_vsi = tc_fltr->dest_vsi; + q = tc_fltr->action.fwd.q.queue; + dest_vsi = ice_locate_vsi_using_queue(vsi, q); break; default: dev_err(dev, @@ -1716,7 +1716,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare * ADQ switch filter */ - ch_vsi = ice_locate_vsi_using_queue(vsi, fltr); + ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue); if (!ch_vsi) return -EINVAL; fltr->dest_vsi = ch_vsi; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 8bbc1a62bdb1..65d387163a46 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -204,6 +204,7 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) return pf->num_dmac_chnl_fltrs; } +struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue); int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); -- cgit v1.2.3