diff options
author | Brett Creeley <brett.creeley@intel.com> | 2020-05-12 04:01:46 +0300 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2020-05-23 08:02:34 +0300 |
commit | 28bf26724fdb0e02267d19e280d6717ee810a10d (patch) | |
tree | 01f86d0cd766635172c7afe408408928f8202c14 /drivers/net/ethernet/intel/ice/ice_main.c | |
parent | 83af00395190bc2db05a67a417a2ea0d5967b74d (diff) | |
download | linux-28bf26724fdb0e02267d19e280d6717ee810a10d.tar.xz |
ice: Implement aRFS
Enable accelerated Receive Flow Steering (aRFS). It is used to steer Rx
flows to a specific queue. This functionality is triggered by the network
stack through ndo_rx_flow_steer and requires Flow Director (ntuple on) to
function.
The fltr_info is used to add/remove/update flow rules in the HW, the
fltr_state is used to determine what to do with the filter with respect
to HW and/or SW, and the flow_id is used in co-ordination with the
network stack.
The work for aRFS is split into two paths: the ndo_rx_flow_steer
operation and the ice_service_task. The former is where the kernel hands
us an Rx SKB among other items to setup aRFS and the latter is where
the driver adds/updates/removes filter rules from HW and updates filter
state.
In the Rx path the following things can happen:
1. New aRFS entries are added to the hash table and the state is
set to ICE_ARFS_INACTIVE so the filter can be updated in HW
by the ice_service_task path.
2. aRFS entries have their Rx Queue updated if we receive a
pre-existing flow_id and the filter state is ICE_ARFS_ACTIVE.
The state is set to ICE_ARFS_INACTIVE so the filter can be
updated in HW by the ice_service_task path.
3. aRFS entries marked as ICE_ARFS_TODEL are deleted
In the ice_service_task path the following things can happen:
1. New aRFS entries marked as ICE_ARFS_INACTIVE are added or
updated in HW.
and their state is updated to ICE_ARFS_ACTIVE.
2. aRFS entries are deleted from HW and their state is updated
to ICE_ARFS_TODEL.
Signed-off-by: Brett Creeley <brett.creeley@intel.com>
Signed-off-by: Madhu Chittim <madhu.chittim@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 48 |
1 files changed, 37 insertions, 11 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d06a3311a2dc..c69567210584 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1113,7 +1113,7 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf) * * If not already scheduled, this puts the task into the work queue. */ -static void ice_service_task_schedule(struct ice_pf *pf) +void ice_service_task_schedule(struct ice_pf *pf) { if (!test_bit(__ICE_SERVICE_DIS, pf->state) && !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && @@ -1483,7 +1483,7 @@ static void ice_service_task(struct work_struct *work) ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); - + ice_sync_arfs_fltrs(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1642,9 +1642,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) } /* register for affinity change notifications */ - q_vector->affinity_notify.notify = ice_irq_affinity_notify; - q_vector->affinity_notify.release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { + struct irq_affinity_notify *affinity_notify; + + affinity_notify = &q_vector->affinity_notify; + affinity_notify->notify = ice_irq_affinity_notify; + affinity_notify->release = ice_irq_affinity_release; + irq_set_affinity_notifier(irq_num, affinity_notify); + } /* assign the mask for this irq */ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); @@ -1656,8 +1661,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector) { vector--; - irq_num = pf->msix_entries[base + vector].vector, - irq_set_affinity_notifier(irq_num, NULL); + irq_num = pf->msix_entries[base + vector].vector; + if (!IS_ENABLED(CONFIG_RFS_ACCEL)) + irq_set_affinity_notifier(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } @@ -2611,12 +2617,22 @@ static int ice_setup_pf_sw(struct ice_pf *pf) */ ice_napi_add(vsi); + status = ice_set_cpu_rx_rmap(vsi); + if (status) { + dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n", + vsi->vsi_num, status); + status = -EINVAL; + goto unroll_napi_add; + } status = ice_init_mac_fltr(pf); if (status) - goto unroll_napi_add; + goto free_cpu_rx_map; return status; +free_cpu_rx_map: + ice_free_cpu_rx_rmap(vsi); + unroll_napi_add: if (vsi) { ice_napi_del(vsi); @@ -3519,6 +3535,8 @@ static void ice_remove(struct pci_dev *pdev) ice_service_task_stop(pf); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + if (!ice_is_safe_mode(pf)) + ice_remove_arfs(pf); ice_devlink_destroy_port(pf); ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); @@ -4036,11 +4054,14 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) ret = ice_cfg_vlan_pruning(vsi, false, false); if ((features & NETIF_F_NTUPLE) && - !(netdev->features & NETIF_F_NTUPLE)) + !(netdev->features & NETIF_F_NTUPLE)) { ice_vsi_manage_fdir(vsi, true); - else if (!(features & NETIF_F_NTUPLE) && - (netdev->features & NETIF_F_NTUPLE)) + ice_init_arfs(vsi); + } else if (!(features & NETIF_F_NTUPLE) && + (netdev->features & NETIF_F_NTUPLE)) { ice_vsi_manage_fdir(vsi, false); + ice_clear_arfs(vsi); + } return ret; } @@ -4942,6 +4963,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* replay Flow Director filters */ ice_fdir_replay_fltrs(pf); + + ice_rebuild_arfs(pf); } ice_update_pf_netdev_link(pf); @@ -5721,6 +5744,9 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ice_rx_flow_steer, +#endif .ndo_tx_timeout = ice_tx_timeout, .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, |