diff options
author | David S. Miller <davem@davemloft.net> | 2023-07-28 13:03:57 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-07-28 13:03:57 +0300 |
commit | f5fbd3246c068d4c67e4259f39c1bd2f27110d7a (patch) | |
tree | a5a56f03a3a93787e2bc1dd2a7abe6a5f3440e21 /drivers/net/ethernet/intel/ice/ice_sched.c | |
parent | 7f6c40391a048c5d0f593f285bee45f7f98a3ca4 (diff) | |
parent | 3579aa86fb4046a378f7d68583cfb7f62e0feabd (diff) | |
download | linux-f5fbd3246c068d4c67e4259f39c1bd2f27110d7a.tar.xz |
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/nex
t-queue
Tony Nguyen says:
====================
ice: Implement support for SRIOV + LAG
Dave Ertman says:
Implement support for SRIOV VF's on interfaces that are in an
aggregate interface.
The first interface added into the aggregate will be flagged as
the primary interface, and this primary interface will be
responsible for managing the VF's resources. VF's created on the
primary are the only VFs that will be supported on the aggregate.
Only Active-Backup mode will be supported and only aggregates whose
primary interface is in switchdev mode will be supported.
The ice-lag DDP must be loaded to support this feature.
Additional restrictions on what interfaces can be added to the aggregate
and still support SRIOV VFs are:
- interfaces have to all be on the same physical NIC
- all interfaces have to have the same QoS settings
- interfaces have to have the FW LLDP agent disabled
- only the primary interface is to be put into switchdev mode
- no more than two interfaces in the aggregate
---
v2:
- Move NULL check for q_ctx in ice_lag_qbuf_recfg() earlier (patch 6)
v1: https://lore.kernel.org/netdev/20230726182141.3797928-1-anthony.l.nguyen@intel.com/
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_sched.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_sched.c | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index b664d60fd037..f4677704b95e 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -447,7 +447,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * * Move scheduling elements (0x0408) */ -static int +int ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -526,7 +526,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, * * This function suspends or resumes HW nodes */ -static int +int ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { @@ -569,18 +569,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; struct ice_q_ctx *q_ctx; + u16 idx; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) return -EINVAL; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { - vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), - new_numqs, - sizeof(*q_ctx), - GFP_KERNEL); - if (!vsi_ctx->lan_q_ctx[tc]) + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, + sizeof(*q_ctx), GFP_KERNEL); + if (!q_ctx) return -ENOMEM; + + for (idx = 0; idx < new_numqs; idx++) { + q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; + q_ctx[idx].q_teid = ICE_INVAL_TEID; + } + + vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return 0; } @@ -592,9 +598,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) sizeof(*q_ctx), GFP_KERNEL); if (!q_ctx) return -ENOMEM; + memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx)); devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); + + for (idx = prev_num; idx < new_numqs; idx++) { + q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; + q_ctx[idx].q_teid = ICE_INVAL_TEID; + } + vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; } @@ -1044,7 +1057,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, * * This function add nodes to a given layer. */ -static int +int ice_sched_add_nodes_to_layer(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, @@ -1119,7 +1132,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) * * This function returns the current VSI layer number */ -static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) +u8 ice_sched_get_vsi_layer(struct ice_hw *hw) { /* Num Layers VSI layer * 9 6 @@ -1142,7 +1155,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * * This function returns the current aggregator layer number */ -static u8 ice_sched_get_agg_layer(struct ice_hw *hw) +u8 ice_sched_get_agg_layer(struct ice_hw *hw) { /* Num Layers aggregator layer * 9 4 @@ -1577,7 +1590,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, * This function retrieves an aggregator node for a given aggregator ID from * a given TC branch */ -static struct ice_sched_node * +struct ice_sched_node * ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, u32 agg_id) { @@ -2139,7 +2152,7 @@ ice_get_agg_info(struct ice_hw *hw, u32 agg_id) * This function walks through the aggregator subtree to find a free parent * node */ -static struct ice_sched_node * +struct ice_sched_node * ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, u16 *num_nodes) { |