summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/iavf/iavf_main.c
diff options
context:
space:
mode:
authorSudheer Mogilappagari <sudheer.mogilappagari@intel.com>2024-10-09 11:10:00 +0300
committerJakub Kicinski <kuba@kernel.org>2024-10-10 18:30:23 +0300
commitef490bbb2267023f3ce60aaf07df10b3a031fb59 (patch)
tree99ee89a2c9d55480a768ec53dbd8ef2891782bdf /drivers/net/ethernet/intel/iavf/iavf_main.c
parent015307754a19832dd665295f6c123289b0f37ba6 (diff)
downloadlinux-ef490bbb2267023f3ce60aaf07df10b3a031fb59.tar.xz
iavf: Add net_shaper_ops support
Implement net_shaper_ops support for IAVF. This enables configuration of rate limiting on per queue basis. Customer intends to enforce bandwidth limit on Tx traffic steered to the queue by configuring rate limits on the queue. To set rate limiting for a queue, update shaper object of given queues in driver and send VIRTCHNL_OP_CONFIG_QUEUE_BW to PF to update HW configuration. Deleting shaper configured for queue is nothing but configuring shaper with bw_max 0. The PF restores the default rate limiting config when bw_max is zero. Reviewed-by: Jiri Pirko <jiri@nvidia.com> Signed-off-by: Sudheer Mogilappagari <sudheer.mogilappagari@intel.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Link: https://patch.msgid.link/5a882cb51998c4c2c3d21fed521498eba1c8f079.1728460186.git.pabeni@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_main.c')
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c112
1 files changed, 111 insertions, 1 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f782402cd789..7764d8ce7f4e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1972,8 +1972,11 @@ static void iavf_finish_config(struct work_struct *work)
adapter = container_of(work, struct iavf_adapter, finish_config);
- /* Always take RTNL first to prevent circular lock dependency */
+ /* Always take RTNL first to prevent circular lock dependency;
+ * The dev->lock is needed to update the queue number
+ */
rtnl_lock();
+ mutex_lock(&adapter->netdev->lock);
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
@@ -2017,6 +2020,7 @@ static void iavf_finish_config(struct work_struct *work)
out:
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&adapter->netdev->lock);
rtnl_unlock();
}
@@ -2085,6 +2089,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW) {
+ iavf_cfg_queues_bw(adapter);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
iavf_configure_queues(adapter);
return 0;
@@ -2919,6 +2928,30 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
}
/**
+ * iavf_reconfig_qs_bw - Call-back task to handle hardware reset
+ * @adapter: board private structure
+ *
+ * After a reset, the shaper parameters of queues need to be replayed again.
+ * Since the net_shaper object inside TX rings persists across reset,
+ * set the update flag for all queues so that the virtchnl message is triggered
+ * for all queues.
+ **/
+static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter)
+{
+ int i, num = 0;
+
+ for (i = 0; i < adapter->num_active_queues; i++)
+ if (adapter->tx_rings[i].q_shaper.bw_min ||
+ adapter->tx_rings[i].q_shaper.bw_max) {
+ adapter->tx_rings[i].q_shaper_update = true;
+ num++;
+ }
+
+ if (num)
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+}
+
+/**
* iavf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
*
@@ -2944,10 +2977,12 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
+ mutex_lock(&netdev->lock);
if (!mutex_trylock(&adapter->crit_lock)) {
if (adapter->state != __IAVF_REMOVE)
queue_work(adapter->wq, &adapter->reset_task);
+ mutex_unlock(&netdev->lock);
return;
}
@@ -2995,6 +3030,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3124,6 +3160,8 @@ continue_reset:
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
+
+ iavf_reconfig_qs_bw(adapter);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -3133,6 +3171,7 @@ continue_reset:
wake_up(&adapter->reset_waitqueue);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
return;
reset_err:
@@ -3143,6 +3182,7 @@ reset_err:
iavf_disable_vf(adapter);
mutex_unlock(&adapter->crit_lock);
+ mutex_unlock(&netdev->lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3614,8 +3654,10 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
+ mutex_lock(&netdev->lock);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
+ mutex_unlock(&netdev->lock);
return ret;
}
@@ -4893,6 +4935,73 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int
+iavf_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ const struct net_shaper_handle *handle = &shaper->handle;
+ struct iavf_ring *tx_ring;
+
+ mutex_lock(&adapter->crit_lock);
+ if (handle->id >= adapter->num_active_queues)
+ goto unlock;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+
+ tx_ring->q_shaper.bw_min = div_u64(shaper->bw_min, 1000);
+ tx_ring->q_shaper.bw_max = div_u64(shaper->bw_max, 1000);
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+unlock:
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+}
+
+static int iavf_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(binding->netdev);
+ struct iavf_ring *tx_ring;
+
+ mutex_lock(&adapter->crit_lock);
+ if (handle->id >= adapter->num_active_queues)
+ goto unlock;
+
+ tx_ring = &adapter->tx_rings[handle->id];
+ tx_ring->q_shaper.bw_min = 0;
+ tx_ring->q_shaper.bw_max = 0;
+ tx_ring->q_shaper_update = true;
+
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
+
+unlock:
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+}
+
+static void iavf_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ if (scope != NET_SHAPER_SCOPE_QUEUE)
+ return;
+
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MIN) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops iavf_shaper_ops = {
+ .set = iavf_shaper_set,
+ .delete = iavf_shaper_del,
+ .capabilities = iavf_shaper_cap,
+};
+
static const struct net_device_ops iavf_netdev_ops = {
.ndo_open = iavf_open,
.ndo_stop = iavf_close,
@@ -4908,6 +5017,7 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_fix_features = iavf_fix_features,
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
+ .net_shaper_ops = &iavf_shaper_ops,
};
/**