summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c813
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c515
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c554
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c208
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c433
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1298
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1072
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c718
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1052
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c842
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
124 files changed, 10300 insertions, 4977 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index af3b2b59a2a6..43a4102e9c09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -645,7 +645,7 @@ static int get_real_size(const struct sk_buff *skb,
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
@@ -653,7 +653,7 @@ static int get_real_size(const struct sk_buff *skb,
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..78c5f40382c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3071,7 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3093,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3109,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3333,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3342,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3999,6 +4001,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4026,6 +4029,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
@@ -4035,6 +4039,7 @@ err_params_unregister:
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4061,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4145,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4172,6 +4181,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4302,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4348,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4356,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4376,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4407,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4428,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4436,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4451,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea867a45764..a3773a8177ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o
+ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
+ lib/crypto.o
#
# Netdev extra
@@ -45,7 +46,8 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -53,7 +55,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/a
en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
en/tc/act/mirred.o en/tc/act/mirred_nic.o \
en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
- en/tc/act/redirect_ingress.o
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
ifneq ($(CONFIG_MLX5_TC_CT),)
mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
@@ -67,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/debugfs.o esw/devlink_port.o esw/vporttbl.o esw/qos.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 9caa1b52321b..3e232a65a0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -166,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
+static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cmd *cmd;
+ char tbuf[6];
+ int weight;
+ int field;
+ int ret;
+
+ cmd = filp->private_data;
+ weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+ field = cmd->max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations slots_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = slots_read,
+};
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -176,6 +198,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
cmd = &dev->priv.dbg.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
+ debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
+
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 50422b56a64d..0571e40c6ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -340,8 +340,10 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
struct auxiliary_driver *adrv;
int ret = 0, i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -389,6 +391,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
@@ -401,7 +404,9 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
pm_message_t pm = {};
int i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -430,6 +435,7 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -438,6 +444,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
{
int ret;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
@@ -450,6 +457,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
@@ -526,16 +534,22 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
+ priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- return 0;
+ goto out;
+
+ err = add_drivers(dev);
- return add_drivers(dev);
+out:
+ priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
+ return err;
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index f85166e587f2..66c6a7017695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -104,7 +104,16 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- return mlx5_fw_reset_wait_reset_done(dev);
+ err = mlx5_fw_reset_wait_reset_done(dev);
+ if (err)
+ return err;
+
+ mlx5_unload_one_devl_locked(dev);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
+ return err;
}
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
@@ -134,6 +143,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -156,17 +166,21 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -174,24 +188,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev, false);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -828,28 +845,28 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b6c15efe92ad..a560df446bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -109,7 +109,7 @@ struct page_pool;
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
@@ -174,8 +174,8 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
- << MLX5_MKEY_BSF_OCTO_SIZE)
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
+ mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}
-static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned.
*/
-#if L1_CACHE_BYTES < 128
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
-#else
- return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+ u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+
+#if L1_CACHE_BYTES >= 128
+ wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
+ return wqebbs;
}
struct mlx5e_tx_wqe {
@@ -321,7 +322,8 @@ struct mlx5e_params {
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
- struct mlx5e_mqprio_rl *rl;
+ u64 max_rate[TC_MAX_QUEUE];
+ u32 hw_id[TC_MAX_QUEUE];
} channel;
} mqprio;
bool rx_cqe_compress_def;
@@ -455,7 +457,7 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
@@ -570,7 +572,7 @@ struct mlx5e_xdpsq {
struct device *pdev;
__be32 mkey_be;
u16 stop_room;
- u16 max_sq_mpw_wqebbs;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -898,16 +900,8 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
-struct mlx5e_htb {
- DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
- DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
- struct mlx5e_sq_stats **qos_sq_stats;
- u16 max_qos_sqs;
- u16 maj_id;
- u16 defcls;
-};
-
struct mlx5e_trap;
+struct mlx5e_htb;
struct mlx5e_priv {
/* priv data path fields - start */
@@ -928,7 +922,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -945,6 +939,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ struct mlx5e_sq_stats **htb_qos_sq_stats;
+ u16 htb_max_qos_sqs;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
@@ -976,7 +972,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
- struct mlx5e_htb htb;
+ struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
};
@@ -992,6 +988,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
@@ -1027,7 +1025,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
@@ -1181,7 +1178,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index ae52e7f38306..b69f9d10ccbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -21,6 +21,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
unsigned int dl_port_index;
+ int ret;
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
@@ -41,7 +42,13 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
memset(dl_port, 0, sizeof(*dl_port));
devlink_port_attrs_set(dl_port, &attrs);
- return devlink_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ ret = devl_port_register(devlink, dl_port, dl_port_index);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
+
+ return ret;
}
void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
@@ -54,8 +61,13 @@ void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
- devlink_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_lock(devlink);
+ devl_port_unregister(dl_port);
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
+ devl_unlock(devlink);
}
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6e3a90a959e9..9b8cdf2e68ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -15,29 +15,6 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_flow_table *miss_t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
-};
-
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
- struct mlx5e_tc_table tc;
+ struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
@@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
-
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..e153d6119e02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs.udp;
+ fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -208,7 +208,7 @@ out:
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs.udp->tables[i].t;
+ dest.ft = priv->fs->udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
+ struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
int i;
if (!fs_udp)
@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs.udp = NULL;
+ priv->fs->udp = NULL;
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
{
int i, err;
- if (priv->fs.udp) {
- priv->fs.udp->ref_cnt++;
+ if (priv->fs->udp) {
+ priv->fs->udp->ref_cnt++;
return 0;
}
- priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
- if (!priv->fs.udp)
+ priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
+ if (!priv->fs->udp)
return -ENOMEM;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_tables;
- priv->fs.udp->ref_cnt = 1;
+ priv->fs->udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs.udp, i);
+ fs_udp_destroy_table(priv->fs->udp, i);
- kfree(priv->fs.udp);
- priv->fs.udp = NULL;
+ kfree(priv->fs->udp);
+ priv->fs->udp = NULL;
return err;
}
@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs.any;
+ fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -474,7 +474,7 @@ err:
static int fs_any_create_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fs.any->table;
+ struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.any->table.t;
+ dest.ft = priv->fs->any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
{
- struct mlx5e_fs_any *fs_any = priv->fs.any;
+ struct mlx5e_fs_any *fs_any = priv->fs->any;
if (!fs_any)
return;
@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs.any = NULL;
+ priv->fs->any = NULL;
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
{
int err;
- if (priv->fs.any) {
- priv->fs.any->ref_cnt++;
+ if (priv->fs->any) {
+ priv->fs->any->ref_cnt++;
return 0;
}
- priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
- if (!priv->fs.any)
+ priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
+ if (!priv->fs->any)
return -ENOMEM;
err = fs_any_create_table(priv);
@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_table;
- priv->fs.any->ref_cnt = 1;
+ priv->fs->any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs.any);
+ fs_any_destroy_table(priv->fs->any);
- kfree(priv->fs.any);
- priv->fs.any = NULL;
+ kfree(priv->fs->any);
+ priv->fs->any = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
new file mode 100644
index 000000000000..6dac76fa58a3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/pkt_cls.h>
+#include "htb.h"
+#include "en.h"
+#include "../qos.h"
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ struct mlx5e_selq *selq;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+/* Software representation of the QoS tree */
+
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = callback(data, node->qid, node->hw_id);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
+{
+ int last;
+
+ last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
+ return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
+}
+
+static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
+{
+ int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
+ struct mlx5e_priv *priv = htb->priv;
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(htb->qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+static struct mlx5e_qos_node *
+mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, htb->qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(htb->priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, htb->qos_used_qids);
+ mlx5e_update_tx_netdev_queues(htb->priv);
+ }
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
+}
+
+/* TX datapath API */
+
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_htb_node_find_rcu(htb, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+/* HTB TC handlers */
+
+static int
+mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ goto err_cancel_selq;
+ }
+
+ root = mlx5e_htb_node_create_root(htb);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ mlx5e_selq_apply(htb->selq);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_htb_node_delete(htb, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(htb->selq);
+ return err;
+}
+
+static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
+
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare_htb(htb->selq, 0, 0);
+ mlx5e_selq_apply(htb->selq);
+
+ root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
+ if (err)
+ qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_htb_node_delete(htb, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
+{
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
+
+ qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ int qid;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_htb_find_unused_qos_qid(htb);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_htb_node_find(htb, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_htb_node_delete(htb, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ struct mlx5e_priv *priv = htb->priv;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_htb_node_delete(htb, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = htb->priv;
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
+
+ node = mlx5e_htb_node_find(htb, *classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, *classid, err);
+
+ mlx5e_htb_node_delete(htb, node);
+
+ moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb->qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(htb->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, moved_qid);
+
+ __set_bit(qid, htb->qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *classid = node->classid;
+ return 0;
+}
+
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ struct mlx5e_priv *priv = htb->priv;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_htb_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(htb->netdev, qid);
+
+ err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_htb_node_delete(htb, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int
+mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_htb_node_find(htb, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_htb_update_children(htb, node, extack);
+
+ return err;
+}
+
+struct mlx5e_htb *mlx5e_htb_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
+}
+
+void mlx5e_htb_free(struct mlx5e_htb *htb)
+{
+ kvfree(htb);
+}
+
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv)
+{
+ htb->mdev = mdev;
+ htb->netdev = netdev;
+ htb->selq = selq;
+ htb->priv = priv;
+ hash_init(htb->qos_tc2node);
+ return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->extack);
+}
+
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
+{
+ mlx5e_htb_root_del(htb);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
new file mode 100644
index 000000000000..8386f1ea4559
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/htb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_EN_HTB_H_
+#define __MLX5E_EN_HTB_H_
+
+#include "qos.h"
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_selq;
+struct mlx5e_htb;
+
+typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
+int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
+
+int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
+
+/* TX datapath API */
+int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
+
+/* HTB TC handlers */
+
+int
+mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int
+mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+struct mlx5e_htb *mlx5e_htb_alloc(void);
+void mlx5e_htb_free(struct mlx5e_htb *htb);
+int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
+ struct net_device *netdev, struct mlx5_core_dev *mdev,
+ struct mlx5e_selq *selq, struct mlx5e_priv *priv);
+void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3c1edfa33aa7..e025040350ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+
+ /* If XDP program is attached, XSK may be turned on at any time without
+ * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
+ * both regular RQ and XSK RQ.
+ * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
+ * doesn't affect its return value, as long as params->xdp_prog != NULL,
+ * so we can just multiply by 2.
+ */
+ if (params->xdp_prog)
+ wqebbs *= 2;
+
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 047f88f09203..903de88bab53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
}
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+}
+
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+ struct skb_shared_hwtstamps hwts = {};
+ struct sk_buff *skb;
+
+ ptpsq->cq_stats->resync_event++;
+
+ while (skb_cc != skb_id) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ skb_tstamp_tx(skb, &hwts);
+ ptpsq->cq_stats->resync_cqe++;
+ skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ }
+}
+
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
int budget)
{
- struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+ u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+ struct sk_buff *skb;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++;
goto out;
}
+ if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+ mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
+
+ skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+ struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa);
@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1;
-
+ if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+ ptpsq->ts_cqe_ctr_mask =
+ (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0;
}
@@ -591,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!ptp_fs->valid)
return;
@@ -608,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5_flow_handle *rule;
int err;
@@ -775,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
if (!ptp_fs)
return -ENOMEM;
- priv->fs.ptp_fs = ptp_fs;
+ priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index a71a32e00ebb..92dbbec472ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats;
+ u16 ts_cqe_ctr_mask;
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 9db677e9ca9c..2842195ee548 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -2,11 +2,16 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
#include "en.h"
#include "params.h"
#include "../qos.h"
+#include "en/htb.h"
-#define BYTES_IN_MBIT 125000
+struct qos_sq_callback_params {
+ struct mlx5e_priv *priv;
+ struct mlx5e_channels *chs;
+};
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
@@ -28,124 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
}
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
-{
- int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
-
- return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
-}
-
-/* Software representation of the QoS tree (internal to this file) */
-
-static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
-{
- int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
- int res;
-
- WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
- res = find_first_zero_bit(priv->htb.qos_used_qids, size);
-
- return res == size ? -ENOSPC : res;
-}
-
-struct mlx5e_qos_node {
- struct hlist_node hnode;
- struct mlx5e_qos_node *parent;
- u64 rate;
- u32 bw_share;
- u32 max_average_bw;
- u32 hw_id;
- u32 classid; /* 16-bit, except root. */
- u16 qid;
-};
-
-#define MLX5E_QOS_QID_INNER 0xffff
-#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
-
-static struct mlx5e_qos_node *
-mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
- struct mlx5e_qos_node *parent)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->parent = parent;
-
- node->qid = qid;
- __set_bit(qid, priv->htb.qos_used_qids);
-
- node->classid = classid;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
-
- mlx5e_update_tx_netdev_queues(priv);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->qid = MLX5E_QOS_QID_INNER;
- node->classid = MLX5E_HTB_CLASSID_ROOT;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
-{
- struct mlx5e_qos_node *node = NULL;
-
- hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
- if (node->classid == classid)
- break;
- }
-
- return node;
-}
-
-static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
-{
- hash_del_rcu(&node->hnode);
- if (node->qid != MLX5E_QOS_QID_INNER) {
- __clear_bit(node->qid, priv->htb.qos_used_qids);
- mlx5e_update_tx_netdev_queues(priv);
- }
- /* Make sure this qid is no longer selected by mlx5e_select_queue, so
- * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
- */
- synchronize_net();
- kfree(node);
-}
-
/* TX datapath API */
-static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{
/* These channel params are safe to access from the datapath, because:
- * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active.
- * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues.
*/
@@ -154,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
}
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
-{
- struct mlx5e_qos_node *node;
- u16 qid;
- int res;
-
- rcu_read_lock();
-
- node = mlx5e_sw_node_find_rcu(priv, classid);
- if (!node) {
- res = -ENOENT;
- goto out;
- }
- qid = READ_ONCE(node->qid);
- if (qid == MLX5E_QOS_QID_INNER) {
- res = -EINVAL;
- goto out;
- }
- res = mlx5e_qid_from_qos(&priv->channels, qid);
-
-out:
- rcu_read_unlock();
- return res;
-}
+/* SQ lifecycle */
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
{
@@ -194,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
return mlx5e_state_dereference(priv, qos_sqs[qid]);
}
-/* SQ lifecycle */
-
-static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
- struct mlx5e_qos_node *node)
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id)
{
struct mlx5e_create_cq_param ccp = {};
struct mlx5e_txqsq __rcu **qos_sqs;
@@ -210,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
params = &chs->params;
- txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+ txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node->qid > priv->htb.max_qos_sqs);
- if (node->qid == priv->htb.max_qos_sqs) {
+ WARN_ON(node_qid > priv->htb_max_qos_sqs);
+ if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL;
- if (priv->htb.max_qos_sqs == 0) {
+ if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list),
GFP_KERNEL);
@@ -229,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return -ENOMEM;
}
if (stats_list)
- WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
- WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
- /* Order max_qos_sqs increment after writing the array pointer.
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
+ /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
*/
- smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
}
- ix = node->qid % params->num_channels;
- qid = node->qid / params->num_channels;
+ ix = node_qid % params->num_channels;
+ qid = node_qid / params->num_channels;
c = chs->c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
@@ -257,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id,
- priv->htb.qos_sq_stats[node->qid]);
+ &param_sq, sq, 0, hw_id,
+ priv->htb_qos_sq_stats[node_qid]);
if (err)
goto err_close_cq;
@@ -273,14 +143,22 @@ err_free_sq:
return err;
}
-static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
+{
+ struct qos_sq_callback_params *cb_params = data;
+
+ return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
+}
+
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
{
+ struct mlx5e_priv *priv = data;
struct mlx5e_txqsq *sq;
u16 qid;
- sq = mlx5e_get_qos_sq(priv, node->qid);
+ sq = mlx5e_get_qos_sq(priv, node_qid);
- qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+ qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
/* If it's a new queue, it will be marked as started at this point.
* Stop it before updating txq2sq.
@@ -295,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
+
+ return 0;
}
-static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
@@ -319,7 +199,7 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
smp_wmb();
}
-static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_params *params;
@@ -369,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c)
kvfree(qos_sqs);
}
-static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -377,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_close_queues(chs->c[i]);
}
-static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
u16 qos_sqs_size;
int i;
@@ -413,24 +293,20 @@ err_free:
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt, err;
-
- if (!priv->htb.maj_id)
- return 0;
+ struct qos_sq_callback_params callback_params;
+ int err;
err = mlx5e_qos_alloc_queues(priv, chs);
if (err)
return err;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- err = mlx5e_open_qos_sq(priv, chs, node);
- if (err) {
- mlx5e_qos_close_all_queues(chs);
- return err;
- }
+ callback_params.priv = priv;
+ callback_params.chs = chs;
+
+ err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
}
return 0;
@@ -438,14 +314,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
- if (node->qid == MLX5E_QOS_QID_INNER)
- continue;
- mlx5e_activate_qos_sq(priv, node);
- }
+ mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
}
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
@@ -474,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
}
}
-static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
{
int i;
@@ -482,293 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_deactivate_queues(chs->c[i]);
}
-/* HTB API */
-
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *root;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
-
- if (!mlx5_qos_is_supported(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
- return -EOPNOTSUPP;
- }
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
-
- err = mlx5e_qos_alloc_queues(priv, &priv->channels);
- if (err)
- goto err_cancel_selq;
- }
-
- root = mlx5e_sw_node_create_root(priv);
- if (IS_ERR(root)) {
- err = PTR_ERR(root);
- goto err_free_queues;
- }
-
- err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
- goto err_sw_node_delete;
- }
-
- WRITE_ONCE(priv->htb.defcls, htb_defcls);
- /* Order maj_id after defcls - pairs with
- * mlx5e_select_queue/mlx5e_select_htb_queues.
- */
- smp_store_release(&priv->htb.maj_id, htb_maj_id);
-
- if (opened)
- mlx5e_selq_apply(&priv->selq);
-
- return 0;
-
-err_sw_node_delete:
- mlx5e_sw_node_delete(priv, root);
-
-err_free_queues:
- if (opened)
- mlx5e_qos_close_all_queues(&priv->channels);
-err_cancel_selq:
- mlx5e_selq_cancel(&priv->selq);
- return err;
-}
-
-int mlx5e_htb_root_del(struct mlx5e_priv *priv)
-{
- struct mlx5e_qos_node *root;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
-
- /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
- * so that we can safely switch to its non-HTB non-PTP fastpath.
- */
- synchronize_net();
-
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
- mlx5e_selq_apply(&priv->selq);
-
- WRITE_ONCE(priv->htb.maj_id, 0);
-
- root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
- if (!root) {
- qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
- return -ENOENT;
- }
- err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
- if (err)
- qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
- root->hw_id, err);
- mlx5e_sw_node_delete(priv, root);
-
- mlx5e_qos_deactivate_all_queues(&priv->channels);
- mlx5e_qos_close_all_queues(&priv->channels);
-
- return err;
-}
-
-static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
- struct mlx5e_qos_node *parent, u32 *bw_share)
-{
- u64 share = 0;
-
- while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
- parent = parent->parent;
-
- if (parent->max_average_bw)
- share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
- parent->max_average_bw);
- else
- share = 101;
-
- *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
-
- qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
- rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
-
- return 0;
-}
-
-static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
-{
- /* Hardware treats 0 as "unlimited", set at least 1. */
- *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
-
- qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
- ceil, *max_average_bw);
-}
-
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- int qid;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
- classid, parent_classid, rate, ceil);
-
- qid = mlx5e_find_unused_qos_qid(priv);
- if (qid < 0) {
- NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
- return qid;
- }
-
- parent = mlx5e_sw_node_find(priv, parent_classid);
- if (!parent)
- return -EINVAL;
-
- node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- mlx5e_sw_node_delete(priv, node);
- return err;
- }
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- return mlx5e_qid_from_qos(&priv->channels, node->qid);
-}
-
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *child;
- int err, tmp_err;
- u32 new_hw_id;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
- classid, child_classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
- node->bw_share, node->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
- qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- /* Intentionally reuse the qid for the upcoming first child. */
- child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto err_destroy_hw_node;
- }
-
- child->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
-
- err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
- child->max_average_bw, &child->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- goto err_delete_sw_node;
- }
-
- /* No fail point. */
-
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, child);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, child);
- }
- }
-
- return 0;
-
-err_delete_sw_node:
- child->qid = MLX5E_QOS_QID_INNER;
- mlx5e_sw_node_delete(priv, child);
-
-err_destroy_hw_node:
- tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
- if (tmp_err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
- new_hw_id, classid, tmp_err);
- return err;
-}
-
-static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
-{
- struct mlx5e_qos_node *node = NULL;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
- if (node->qid == qid)
- break;
-
- return node;
-}
-
-static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
{
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
netdev_tx_reset_queue(txq);
netif_tx_start_queue(txq);
}
-static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
@@ -781,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc));
}
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack)
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
{
- struct mlx5e_qos_node *node;
- struct netdev_queue *txq;
- u16 qid, moved_qid;
- bool opened;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
-
- node = mlx5e_sw_node_find(priv, *classid);
- if (!node)
- return -ENOENT;
-
- /* Store qid for reuse. */
- qid = node->qid;
-
- opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, qid));
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, *classid, err);
-
- mlx5e_sw_node_delete(priv, node);
-
- moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
-
- if (moved_qid == 0) {
- /* The last QoS SQ was just destroyed. */
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
- moved_qid--;
-
- if (moved_qid < qid) {
- /* The highest QoS SQ was just destroyed. */
- WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
- qid, moved_qid);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, qid, txq);
- return 0;
- }
-
- WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
- qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
-
- node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
- WARN(!node, "Could not find a node with qid %u to move to queue %u",
- moved_qid, qid);
-
- /* Stop traffic to the old queue. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
- __clear_bit(moved_qid, priv->htb.qos_used_qids);
-
- if (opened) {
- txq = netdev_get_tx_queue(priv->netdev,
- mlx5e_qid_from_qos(&priv->channels, moved_qid));
- mlx5e_deactivate_qos_sq(priv, moved_qid);
- mlx5e_close_qos_sq(priv, moved_qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, moved_qid);
-
- __set_bit(qid, priv->htb.qos_used_qids);
- WRITE_ONCE(node->qid, qid);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
- node->classid, moved_qid, qid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
- }
- }
-
- mlx5e_update_tx_netdev_queues(priv);
- if (opened)
- mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
-
- *classid = node->classid;
- return 0;
-}
-
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *node, *parent;
- u32 old_hw_id, new_hw_id;
- int err, saved_err = 0;
- u16 qid;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
- force ? "_FORCE" : "", classid);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
- node->parent->bw_share,
- node->parent->max_average_bw,
- &new_hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
- qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
- classid, err);
- if (!force)
- return err;
- saved_err = err;
- }
-
- /* Store qid for reuse and prevent clearing the bit. */
- qid = node->qid;
- /* Pairs with mlx5e_get_txq_by_classid. */
- WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_deactivate_qos_sq(priv, qid);
- mlx5e_close_qos_sq(priv, qid);
- }
-
- /* Prevent packets from the old class from getting into the new one. */
- mlx5e_reset_qdisc(priv->netdev, qid);
-
- err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- parent = node->parent;
- mlx5e_sw_node_delete(priv, node);
+ struct mlx5e_htb *htb = priv->htb;
+ int res;
- node = parent;
- WRITE_ONCE(node->qid, qid);
+ if (!htb && htb_qopt->command != TC_HTB_CREATE)
+ return -EINVAL;
- /* Early return on error in force mode. Parent will still be an inner
- * node to be deleted by a following delete operation.
- */
- if (saved_err)
- return saved_err;
-
- old_hw_id = node->hw_id;
- node->hw_id = new_hw_id;
-
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_open_qos_sq(priv, &priv->channels, node);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
- qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
- classid, err);
- } else {
- mlx5e_activate_qos_sq(priv, node);
+ switch (htb_qopt->command) {
+ case TC_HTB_CREATE:
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(htb_qopt->extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
}
- }
-
- err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
- if (err) /* Not fatal. */
- qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
- node->hw_id, classid, err);
-
- return 0;
-}
-
-static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
- struct netlink_ext_ack *extack)
-{
- struct mlx5e_qos_node *child;
- int err = 0;
- int bkt;
-
- hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
- u32 old_bw_share = child->bw_share;
- int err_one;
-
- if (child->parent != node)
- continue;
-
- mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
- if (child->bw_share == old_bw_share)
- continue;
-
- err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
- child->max_average_bw, child->hw_id);
- if (!err && err_one) {
- err = err_one;
-
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
- qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
- node->classid, err);
+ priv->htb = mlx5e_htb_alloc();
+ htb = priv->htb;
+ if (!htb)
+ return -ENOMEM;
+ res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
+ if (res) {
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
}
+ return res;
+ case TC_HTB_DESTROY:
+ mlx5e_htb_cleanup(htb);
+ mlx5e_htb_free(htb);
+ priv->htb = NULL;
+ return 0;
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
+ htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
+ htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb_qopt->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
+ htb_qopt->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
+ if (res < 0)
+ return res;
+ htb_qopt->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-
- return err;
-}
-
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack)
-{
- u32 bw_share, max_average_bw;
- struct mlx5e_qos_node *node;
- bool ceil_changed = false;
- int err;
-
- qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
- classid, rate, ceil);
-
- node = mlx5e_sw_node_find(priv, classid);
- if (!node)
- return -ENOENT;
-
- node->rate = rate;
- mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
- mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
-
- err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
- max_average_bw, node->hw_id);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
- qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
- classid, err);
- return err;
- }
-
- if (max_average_bw != node->max_average_bw)
- ceil_changed = true;
-
- node->bw_share = bw_share;
- node->max_average_bw = max_average_bw;
-
- if (ceil_changed)
- err = mlx5e_qos_update_children(priv, node, extack);
-
- return err;
}
struct mlx5e_mqprio_rl {
@@ -1111,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc];
return 0;
}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 5d9bd91d86c2..4947afa23b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -6,40 +6,39 @@
#include <linux/mlx5/driver.h>
-#define MLX5E_QOS_MAX_LEAF_NODES 256
+#define BYTES_IN_MBIT 125000
struct mlx5e_priv;
+struct mlx5e_htb;
struct mlx5e_channels;
struct mlx5e_channel;
+struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
-int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
-
-/* TX datapath API */
-int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
/* SQ lifecycle */
+int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ u16 node_qid, u32 hw_id);
+int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
+void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
+void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
+void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
+
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
+int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+
+/* TX datapath API */
+u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */
-int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_root_del(struct mlx5e_priv *priv);
-int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
- u32 parent_classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
- u64 rate, u64 ceil, struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
- struct netlink_ext_ack *extack);
-int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
- struct netlink_ext_ack *extack);
+int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
/* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 48dc121b2cb4..39ef2a2561a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -269,6 +269,12 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
attr->u.vlan_filtering, br_offloads);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlx5_esw_bridge_vlan_proto_set(vport_num,
+ esw_owner_vhca_id,
+ attr->u.vlan_protocol,
+ br_offloads);
+ break;
default:
err = -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 86fa0bdbee36..fac7e3ff2674 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
+#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
+static int
+mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct flow_action_entry *action;
+ struct mlx5e_tc_act *act;
+ bool add = false;
+ int i;
+
+ /* There is no use case currently for more than one action (e.g. pedit).
+ * when there will be, need to handle cleaning multiple actions on err.
+ */
+ if (!flow_offload_has_one_action(&fl_act->action))
+ return -EOPNOTSUPP;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ flow_action_for_each(i, action, &fl_act->action) {
+ act = mlx5e_tc_act_get(action->id, ns_type);
+ if (!act)
+ continue;
+
+ if (!act->offload_action)
+ continue;
+
+ if (!act->offload_action(priv, fl_act, action))
+ add = true;
+ }
+
+ return add ? 0 : -EOPNOTSUPP;
+}
+
+static int
+mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->destroy_action)
+ return -EOPNOTSUPP;
+
+ return act->destroy_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+
+{
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_tc_act *act;
+
+ if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
+ ns_type = MLX5_FLOW_NAMESPACE_FDB;
+ else
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+
+ act = mlx5e_tc_act_get(fl_act->id, ns_type);
+ if (!act || !act->stats_action)
+ return -EOPNOTSUPP;
+
+ return act->stats_action(priv, fl_act);
+}
+
+static int
+mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
+ struct flow_offload_action *fl_act)
+{
+ switch (fl_act->command) {
+ case FLOW_ACT_REPLACE:
+ return mlx5e_rep_indr_replace_act(rpriv, fl_act);
+ case FLOW_ACT_DESTROY:
+ return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
+ case FLOW_ACT_STATS:
+ return mlx5e_rep_indr_stats_act(rpriv, fl_act);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
+ enum tc_setup_type type,
+ void *data)
+{
+ if (!data)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_ACT:
+ return mlx5e_rep_indr_setup_act(rpriv, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
- return -EOPNOTSUPP;
+ return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index d98a277eb7f8..f675b1926340 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -7,6 +7,7 @@
#include <linux/rcupdate.h>
#include "en.h"
#include "en/ptp.h"
+#include "en/htb.h"
struct mlx5e_selq_params {
unsigned int num_regular_queues;
@@ -19,6 +20,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1;
};
};
+ u16 htb_maj_id;
+ u16 htb_defcls;
};
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1,
.is_htb = false,
.is_ptp = false,
+ .htb_maj_id = 0,
+ .htb_defcls = 0,
};
rcu_assign_pointer(selq->active, init_params);
@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL;
}
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{
+ struct mlx5e_selq_params *selq_active;
+
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs;
- selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
}
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *selq_active =
+ rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
+
+ return selq_active->htb_maj_id;
+}
+
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
+{
+ struct mlx5e_selq_params *selq_active;
+
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq_active = rcu_dereference_protected(selq->active,
+ lockdep_is_held(selq->state_lock));
+ *selq->standby = *selq_active;
+ selq->standby->is_htb = htb_maj_id;
+ selq->standby->htb_maj_id = htb_maj_id;
+ selq->standby->htb_defcls = htb_defcls;
+}
+
void mlx5e_selq_apply(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *old_params;
@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up;
}
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
{
u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority);
else
- classid = READ_ONCE(priv->htb.defcls);
+ classid = selq->htb_defcls;
if (!classid)
return 0;
- return mlx5e_get_txq_by_classid(priv, classid);
+ return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
}
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels;
}
- if (unlikely(selq->is_htb)) {
+ if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */
- txq_ix = mlx5e_select_htb_queue(priv, skb);
+ txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0)
return txq_ix;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
index 6c070141d8f1..fd590f80e4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
-void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
+void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
+bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 2755c25ba324..305fde62a78d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -30,7 +30,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
NULL, /* FLOW_ACTION_WAKE, */
NULL, /* FLOW_ACTION_QUEUE, */
&mlx5e_tc_act_sample,
- NULL, /* FLOW_ACTION_POLICE, */
+ &mlx5e_tc_act_police,
&mlx5e_tc_act_ct,
NULL, /* FLOW_ACTION_CT_METADATA, */
&mlx5e_tc_act_mpls_push,
@@ -106,8 +106,8 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
{
memset(parse_state, 0, sizeof(*parse_state));
parse_state->flow = flow;
- parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
+ parse_state->flow_action = flow_action;
}
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index f34714c5ddd4..e1570ff056ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -13,7 +13,7 @@
struct mlx5_flow_attr;
struct mlx5e_tc_act_parse_state {
- unsigned int num_actions;
+ struct flow_action *flow_action;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
@@ -50,6 +50,16 @@ struct mlx5e_tc_act {
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
+
+ int (*offload_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act);
+
+ int (*destroy_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
+
+ int (*stats_action)(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
};
struct mlx5e_tc_flow_action {
@@ -76,6 +86,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
extern struct mlx5e_tc_act mlx5e_tc_act_ptype;
extern struct mlx5e_tc_act mlx5e_tc_act_redirect_ingress;
+extern struct mlx5e_tc_act mlx5e_tc_act_police;
struct mlx5e_tc_act *
mlx5e_tc_act_get(enum flow_action_id act_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 4726bcb46eec..69949ab830b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
new file mode 100644
index 000000000000..37522352e4b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "act.h"
+#include "en/tc_priv.h"
+
+static bool
+tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(parse_state->extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return false;
+ }
+ if (mlx5e_policer_validate(parse_state->flow_action, act,
+ parse_state->extack))
+ return false;
+
+ return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
+}
+
+static int
+fill_meter_params_from_act(const struct flow_action_entry *act,
+ struct mlx5e_flow_meter_params *params)
+{
+ params->index = act->hw_index;
+ if (act->police.rate_bytes_ps) {
+ params->mode = MLX5_RATE_LIMIT_BPS;
+ /* change rate to bits per second */
+ params->rate = act->police.rate_bytes_ps << 3;
+ params->burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps) {
+ params->mode = MLX5_RATE_LIMIT_PPS;
+ params->rate = act->police.rate_pkt_ps;
+ params->burst = act->police.burst_pkt;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ int err;
+
+ err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ if (err)
+ return err;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+
+ return 0;
+}
+
+static bool
+tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return true;
+}
+
+static int
+tc_act_police_offload(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act,
+ struct flow_action_entry *act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ int err = 0;
+
+ err = fill_meter_params_from_act(act, &params);
+ if (err)
+ return err;
+
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
+ meter = mlx5e_tc_meter_replace(priv->mdev, &params);
+ } else if (!IS_ERR(meter)) {
+ err = mlx5e_tc_meter_update(meter, &params);
+ mlx5e_tc_meter_put(meter);
+ }
+
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ err = PTR_ERR(meter);
+ }
+
+ return err;
+}
+
+static int
+tc_act_police_destroy(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+ /* first put for the get and second for cleanup */
+ mlx5e_tc_meter_put(meter);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+static int
+tc_act_police_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ struct mlx5e_flow_meter_params params = {};
+ struct mlx5e_flow_meter_handle *meter;
+ u64 bytes, packets, drops, lastuse;
+
+ params.index = fl_act->index;
+ meter = mlx5e_tc_meter_get(priv->mdev, &params);
+ if (IS_ERR(meter)) {
+ NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
+ mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
+ return PTR_ERR(meter);
+ }
+
+ mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
+ flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
+ FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5e_tc_meter_put(meter);
+ return 0;
+}
+
+struct mlx5e_tc_act mlx5e_tc_act_police = {
+ .can_offload = tc_act_can_offload_police,
+ .parse_action = tc_act_parse_police,
+ .is_multi_table_act = tc_act_is_multi_table_act_police,
+ .offload_action = tc_act_police_offload,
+ .destroy_action = tc_act_police_destroy,
+ .stats_action = tc_act_police_stats,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index a7d9eab19e4a..53b270f652b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -12,7 +12,7 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
{
struct netlink_ext_ack *extack = parse_state->extack;
- if (parse_state->num_actions != 1) {
+ if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
new file mode 100644
index 000000000000..a53e205f4a89
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/math64.h>
+#include "lib/aso.h"
+#include "en/tc/post_act.h"
+#include "meter.h"
+#include "en/tc_priv.h"
+
+#define MLX5_START_COLOR_SHIFT 28
+#define MLX5_METER_MODE_SHIFT 24
+#define MLX5_CBS_EXP_SHIFT 24
+#define MLX5_CBS_MAN_SHIFT 16
+#define MLX5_CIR_EXP_SHIFT 8
+
+/* cir = 8*(10^9)*cir_mantissa/(2^cir_exponent)) bits/s */
+#define MLX5_CONST_CIR 8000000000ULL
+#define MLX5_CALC_CIR(m, e) ((MLX5_CONST_CIR * (m)) >> (e))
+#define MLX5_MAX_CIR ((MLX5_CONST_CIR * 0x100) - 1)
+
+/* cbs = cbs_mantissa*2^cbs_exponent */
+#define MLX5_CALC_CBS(m, e) ((m) << (e))
+#define MLX5_MAX_CBS ((0x100ULL << 0x1F) - 1)
+#define MLX5_MAX_HW_CBS 0x7FFFFFFF
+
+struct mlx5e_flow_meter_aso_obj {
+ struct list_head entry;
+ int base_id;
+ int total_meters;
+
+ unsigned long meters_map[0]; /* must be at the end of this struct */
+};
+
+struct mlx5e_flow_meters {
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_aso *aso;
+ struct mutex aso_lock; /* Protects aso operations */
+ int log_granularity;
+ u32 pdn;
+
+ DECLARE_HASHTABLE(hashtbl, 8);
+
+ struct mutex sync_lock; /* protect flow meter operations */
+ struct list_head partial_list;
+ struct list_head full_list;
+
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_post_act *post_act;
+};
+
+static void
+mlx5e_flow_meter_cir_calc(u64 cir, u8 *man, u8 *exp)
+{
+ s64 _cir, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cir << e;
+ if ((s64)m < 0) /* overflow */
+ break;
+ m = div64_u64(m, MLX5_CONST_CIR);
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cir = MLX5_CALC_CIR(m, e);
+ _delta = cir - _cir;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+static void
+mlx5e_flow_meter_cbs_calc(u64 cbs, u8 *man, u8 *exp)
+{
+ s64 _cbs, _delta, delta = S64_MAX;
+ u8 e, _man = 0, _exp = 0;
+ u64 m;
+
+ for (e = 0; e <= 0x1F; e++) { /* exp width 5bit */
+ m = cbs >> e;
+ if (m > 0xFF) /* man width 8 bit */
+ continue;
+ _cbs = MLX5_CALC_CBS(m, e);
+ _delta = cbs - _cbs;
+ if (_delta < delta) {
+ _man = m;
+ _exp = e;
+ if (!_delta)
+ goto found;
+ delta = _delta;
+ }
+ }
+
+found:
+ *man = _man;
+ *exp = _exp;
+}
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params)
+{
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl;
+ struct mlx5_wqe_aso_data_seg *aso_data;
+ struct mlx5e_flow_meters *flow_meters;
+ u8 cir_man, cir_exp, cbs_man, cbs_exp;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *aso;
+ u64 rate, burst;
+ u8 ds_cnt;
+ int err;
+
+ rate = meter_params->rate;
+ burst = meter_params->burst;
+
+ /* HW treats each packet as 128 bytes in PPS mode */
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS) {
+ rate <<= 10;
+ burst <<= 7;
+ }
+
+ if (!rate || rate > MLX5_MAX_CIR || !burst || burst > MLX5_MAX_CBS)
+ return -EINVAL;
+
+ /* HW has limitation of total 31 bits for cbs */
+ if (burst > MLX5_MAX_HW_CBS) {
+ mlx5_core_warn(mdev,
+ "burst(%lld) is too large, use HW allowed value(%d)\n",
+ burst, MLX5_MAX_HW_CBS);
+ burst = MLX5_MAX_HW_CBS;
+ }
+
+ mlx5_core_dbg(mdev, "meter mode=%d\n", meter_params->mode);
+ mlx5e_flow_meter_cir_calc(rate, &cir_man, &cir_exp);
+ mlx5_core_dbg(mdev, "rate=%lld, cir=%lld, exp=%d, man=%d\n",
+ rate, MLX5_CALC_CIR(cir_man, cir_exp), cir_exp, cir_man);
+ mlx5e_flow_meter_cbs_calc(burst, &cbs_man, &cbs_exp);
+ mlx5_core_dbg(mdev, "burst=%lld, cbs=%lld, exp=%d, man=%d\n",
+ burst, MLX5_CALC_CBS((u64)cbs_man, cbs_exp), cbs_exp, cbs_man);
+
+ if (!cir_man || !cbs_man)
+ return -EINVAL;
+
+ flow_meters = meter->flow_meters;
+ aso = flow_meters->aso;
+
+ mutex_lock(&flow_meters->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso, ds_cnt, aso_wqe, meter->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
+
+ aso_ctrl = &aso_wqe->aso_ctrl;
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
+ aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
+ MLX5_ASO_ALWAYS_TRUE << 4;
+ aso_ctrl->data_offset_condition_operand = MLX5_ASO_LOGICAL_OR << 6;
+ aso_ctrl->data_mask = cpu_to_be64(0x80FFFFFFULL << (meter->idx ? 0 : 32));
+
+ aso_data = (struct mlx5_wqe_aso_data_seg *)(aso_wqe + 1);
+ memset(aso_data, 0, sizeof(*aso_data));
+ aso_data->bytewise_data[meter->idx * 8] = cpu_to_be32((0x1 << 31) | /* valid */
+ (MLX5_FLOW_METER_COLOR_GREEN << MLX5_START_COLOR_SHIFT));
+ if (meter_params->mode == MLX5_RATE_LIMIT_PPS)
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_NUM_PACKETS << MLX5_METER_MODE_SHIFT);
+ else
+ aso_data->bytewise_data[meter->idx * 8] |=
+ cpu_to_be32(MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH << MLX5_METER_MODE_SHIFT);
+
+ aso_data->bytewise_data[meter->idx * 8 + 2] = cpu_to_be32((cbs_exp << MLX5_CBS_EXP_SHIFT) |
+ (cbs_man << MLX5_CBS_MAN_SHIFT) |
+ (cir_exp << MLX5_CIR_EXP_SHIFT) |
+ cir_man);
+
+ mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl);
+
+ /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */
+ err = mlx5_aso_poll_cq(aso, true, 10);
+ mutex_unlock(&flow_meters->aso_lock);
+
+ return err;
+}
+
+static int
+mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ void *obj;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+
+ obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
+ MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err) {
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) created\n", *obj_id);
+ }
+
+ return err;
+}
+
+static void
+mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ mlx5_core_dbg(mdev, "flow meter aso obj(0x%x) destroyed\n", obj_id);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+{
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5_fc *counter;
+ int err, pos, total;
+ u32 id;
+
+ meter = kzalloc(sizeof(*meter), GFP_KERNEL);
+ if (!meter)
+ return ERR_PTR(-ENOMEM);
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_red_counter;
+ }
+ meter->red_counter = counter;
+
+ counter = mlx5_fc_create(mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_green_counter;
+ }
+ meter->green_counter = counter;
+
+ meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
+ struct mlx5e_flow_meter_aso_obj,
+ entry);
+ /* 2 meters in one object */
+ total = 1 << (flow_meters->log_granularity + 1);
+ if (!meters_obj) {
+ err = mlx5e_flow_meter_create_aso_obj(flow_meters, &id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create flow meter ASO object\n");
+ goto err_create;
+ }
+
+ meters_obj = kzalloc(sizeof(*meters_obj) + BITS_TO_BYTES(total),
+ GFP_KERNEL);
+ if (!meters_obj) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
+
+ meters_obj->base_id = id;
+ meters_obj->total_meters = total;
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ pos = 0;
+ } else {
+ pos = find_first_zero_bit(meters_obj->meters_map, total);
+ if (bitmap_weight(meters_obj->meters_map, total) == total - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->full_list);
+ }
+ }
+
+ bitmap_set(meters_obj->meters_map, pos, 1);
+ meter->flow_meters = flow_meters;
+ meter->meters_obj = meters_obj;
+ meter->obj_id = meters_obj->base_id + pos / 2;
+ meter->idx = pos % 2;
+
+ mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+
+ return meter;
+
+err_mem:
+ mlx5e_flow_meter_destroy_aso_obj(mdev, id);
+err_create:
+ mlx5_fc_destroy(mdev, meter->green_counter);
+err_green_counter:
+ mlx5_fc_destroy(mdev, meter->red_counter);
+err_red_counter:
+ kfree(meter);
+ return ERR_PTR(err);
+}
+
+static void
+__mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+ struct mlx5_core_dev *mdev = flow_meters->mdev;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ int n, pos;
+
+ mlx5_fc_destroy(mdev, meter->green_counter);
+ mlx5_fc_destroy(mdev, meter->red_counter);
+
+ meters_obj = meter->meters_obj;
+ pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
+ bitmap_clear(meters_obj->meters_map, pos, 1);
+ n = bitmap_weight(meters_obj->meters_map, meters_obj->total_meters);
+ if (n == 0) {
+ list_del(&meters_obj->entry);
+ mlx5e_flow_meter_destroy_aso_obj(mdev, meters_obj->base_id);
+ kfree(meters_obj);
+ } else if (n == meters_obj->total_meters - 1) {
+ list_del(&meters_obj->entry);
+ list_add(&meters_obj->entry, &flow_meters->partial_list);
+ }
+
+ mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
+ meter->obj_id, meter->idx);
+ kfree(meter);
+}
+
+static struct mlx5e_flow_meter_handle *
+__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
+ if (meter->params.index == index)
+ goto add_ref;
+
+ return ERR_PTR(-ENOENT);
+
+add_ref:
+ meter->refcnt++;
+
+ return meter;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ mutex_unlock(&flow_meters->sync_lock);
+
+ return meter;
+}
+
+static void
+__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ if (--meter->refcnt == 0) {
+ hash_del(&meter->hlist);
+ __mlx5e_flow_meter_free(meter);
+ }
+}
+
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
+{
+ struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
+
+ mutex_lock(&flow_meters->sync_lock);
+ __mlx5e_tc_meter_put(meter);
+ mutex_unlock(&flow_meters->sync_lock);
+}
+
+static struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = __mlx5e_flow_meter_alloc(flow_meters);
+ if (IS_ERR(meter))
+ return meter;
+
+ hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
+ meter->params.index = params->index;
+ meter->refcnt++;
+
+ return meter;
+}
+
+static int
+__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ int err = 0;
+
+ if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
+ meter->params.burst != params->burst) {
+ err = mlx5e_tc_meter_modify(mdev, meter, params);
+ if (err)
+ goto out;
+
+ meter->params.mode = params->mode;
+ meter->params.rate = params->rate;
+ meter->params.burst = params->burst;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_meters->sync_lock);
+ err = __mlx5e_tc_meter_update(meter, params);
+ mutex_unlock(&flow_meters->sync_lock);
+ return err;
+}
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
+{
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_handle *meter;
+ int err;
+
+ flow_meters = mlx5e_get_flow_meters(mdev);
+ if (!flow_meters)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&flow_meters->sync_lock);
+ meter = __mlx5e_tc_meter_get(flow_meters, params->index);
+ if (IS_ERR(meter)) {
+ meter = mlx5e_tc_meter_alloc(flow_meters, params);
+ if (IS_ERR(meter)) {
+ err = PTR_ERR(meter);
+ goto err_get;
+ }
+ }
+
+ err = __mlx5e_tc_meter_update(meter, params);
+ if (err)
+ goto err_update;
+
+ mutex_unlock(&flow_meters->sync_lock);
+ return meter;
+
+err_update:
+ __mlx5e_tc_meter_put(meter);
+err_get:
+ mutex_unlock(&flow_meters->sync_lock);
+ return ERR_PTR(err);
+}
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
+{
+ return flow_meters->ns_type;
+}
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_flow_meters *flow_meters;
+ int err;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (IS_ERR_OR_NULL(post_act)) {
+ netdev_dbg(priv->netdev,
+ "flow meter offload is not supported, post action is missing\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ flow_meters = kzalloc(sizeof(*flow_meters), GFP_KERNEL);
+ if (!flow_meters)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(mdev, &flow_meters->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc pd for flow meter aso, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_meters->aso = mlx5_aso_create(mdev, flow_meters->pdn);
+ if (IS_ERR(flow_meters->aso)) {
+ mlx5_core_warn(mdev, "Failed to create aso wqe for flow meter\n");
+ err = PTR_ERR(flow_meters->aso);
+ goto err_sq;
+ }
+
+ mutex_init(&flow_meters->sync_lock);
+ INIT_LIST_HEAD(&flow_meters->partial_list);
+ INIT_LIST_HEAD(&flow_meters->full_list);
+
+ flow_meters->ns_type = ns_type;
+ flow_meters->mdev = mdev;
+ flow_meters->post_act = post_act;
+ mutex_init(&flow_meters->aso_lock);
+ flow_meters->log_granularity = min_t(int, 6,
+ MLX5_CAP_QOS(mdev, log_meter_aso_max_alloc));
+
+ return flow_meters;
+
+err_sq:
+ mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
+err_out:
+ kfree(flow_meters);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
+{
+ if (IS_ERR_OR_NULL(flow_meters))
+ return;
+
+ mlx5_aso_destroy(flow_meters->aso);
+ mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
+ kfree(flow_meters);
+}
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
+{
+ u64 bytes1, packets1, lastuse1;
+ u64 bytes2, packets2, lastuse2;
+
+ mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+
+ *bytes = bytes1 + bytes2;
+ *packets = packets1 + packets2;
+ *drops = packets2;
+ *lastuse = max_t(u64, lastuse1, lastuse2);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
new file mode 100644
index 000000000000..6de6e8a16327
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_FLOW_METER_H__
+#define __MLX5_EN_FLOW_METER_H__
+
+struct mlx5e_post_meter_priv;
+struct mlx5e_flow_meter_aso_obj;
+struct mlx5e_flow_meters;
+struct mlx5_flow_attr;
+
+enum mlx5e_flow_meter_mode {
+ MLX5_RATE_LIMIT_BPS,
+ MLX5_RATE_LIMIT_PPS,
+};
+
+struct mlx5e_flow_meter_params {
+ enum mlx5e_flow_meter_mode mode;
+ /* police action index */
+ u32 index;
+ u64 rate;
+ u64 burst;
+};
+
+struct mlx5e_flow_meter_handle {
+ struct mlx5e_flow_meters *flow_meters;
+ struct mlx5e_flow_meter_aso_obj *meters_obj;
+ u32 obj_id;
+ u8 idx;
+
+ int refcnt;
+ struct hlist_node hlist;
+ struct mlx5e_flow_meter_params params;
+
+ struct mlx5_fc *green_counter;
+ struct mlx5_fc *red_counter;
+};
+
+struct mlx5e_meter_attr {
+ struct mlx5e_flow_meter_params params;
+ struct mlx5e_flow_meter_handle *meter;
+ struct mlx5e_post_meter_priv *post_meter;
+};
+
+int
+mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
+ struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *meter_params);
+
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+void
+mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
+int
+mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
+ struct mlx5e_flow_meter_params *params);
+struct mlx5e_flow_meter_handle *
+mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
+
+enum mlx5_flow_namespace_type
+mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
+
+struct mlx5e_flow_meters *
+mlx5e_flow_meters_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_action);
+void
+mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
+
+void
+mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
+ u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+
+#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index dea137dd744b..4e48946c4c2a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -22,9 +22,9 @@ struct mlx5e_post_act_handle {
u32 id;
};
-#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
-#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0)
-#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX
+#define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
+#define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
@@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
new file mode 100644
index 000000000000..8b77e822810e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "en/tc_priv.h"
+#include "post_meter.h"
+#include "en/tc/post_act.h"
+
+#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
+#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
+
+struct mlx5e_post_meter_priv {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *fwd_green_rule;
+ struct mlx5_flow_handle *drop_red_rule;
+};
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->ft;
+}
+
+static int
+mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+
+ root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
+ if (!root_ns) {
+ mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
+ return -EOPNOTSUPP;
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(post_meter->ft)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ return PTR_ERR(post_meter->ft);
+ }
+
+ return 0;
+}
+
+static int
+mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *misc2, *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ misc2 = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_5, MLX5_PACKET_COLOR_MASK);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
+ if (IS_ERR(post_meter->fg)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
+ err = PTR_ERR(post_meter->fg);
+ }
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[0].counter_id = mlx5_fc_id(red_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ err = PTR_ERR(rule);
+ goto err_red;
+ }
+ post_meter->drop_red_rule = rule;
+
+ mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
+ MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(green_counter);
+
+ rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ err = PTR_ERR(rule);
+ goto err_green;
+ }
+ post_meter->fwd_green_rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_green:
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+err_red:
+ kvfree(spec);
+ return err;
+}
+
+static void
+mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(post_meter->fwd_green_rule);
+}
+
+static void
+mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_group(post_meter->fg);
+}
+
+static void
+mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->ft);
+}
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter)
+{
+ struct mlx5e_post_meter_priv *post_meter;
+ int err;
+
+ post_meter = kzalloc(sizeof(*post_meter), GFP_KERNEL);
+ if (!post_meter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
+ if (err)
+ goto err_ft;
+
+ err = mlx5e_post_meter_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
+ red_counter);
+ if (err)
+ goto err_rules;
+
+ return post_meter;
+
+err_rules:
+ mlx5e_post_meter_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_table_destroy(post_meter);
+err_ft:
+ kfree(post_meter);
+ return ERR_PTR(err);
+}
+
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rules_destroy(post_meter);
+ mlx5e_post_meter_fg_destroy(post_meter);
+ mlx5e_post_meter_table_destroy(post_meter);
+ kfree(post_meter);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
new file mode 100644
index 000000000000..34d0e4b9fc7a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_POST_METER_H__
+#define __MLX5_EN_POST_METER_H__
+
+#define packet_color_to_reg { \
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5, \
+ .moffset = 0, \
+ .mlen = 8, \
+ .soffset = MLX5_BYTE_OFF(fte_match_param, \
+ misc_parameters_2.metadata_reg_c_5), \
+}
+
+struct mlx5e_post_meter_priv;
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5e_post_meter_priv *
+mlx5e_post_meter_init(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *green_counter,
+ struct mlx5_fc *red_counter);
+void
+mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+
+#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index ba171c7f0a67..864ce0c393e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -36,8 +36,8 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
-#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
+#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
/* Statically allocate modify actions for
* ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
*/
- if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing";
err = -EOPNOTSUPP;
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 00a3ba862afb..5bbd6b92840f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -62,10 +62,11 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_4),\
}
+/* 8 LSB of metadata C5 are reserved for packet color */
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
- .moffset = 0,\
- .mlen = 32,\
+ .moffset = 8,\
+ .mlen = 24,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -84,10 +85,8 @@ struct mlx5_ct_attr {
.mlen = ESW_ZONE_ID_BITS,\
}
-#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
-#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
-#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
+#define MLX5_CT_ZONE_BITS MLX5_REG_MAPPING_MBITS(ZONE_TO_REG)
+#define MLX5_CT_ZONE_MASK MLX5_REG_MAPPING_MASK(ZONE_TO_REG)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 3b74a6fd5c43..10c9a8a79d00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
-#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
@@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_act_parse_state parse_state;
};
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
+
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
@@ -203,7 +204,13 @@ struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
+struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
+
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack);
+
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index a8cfab4a393c..cc18d97d8ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -7,6 +7,8 @@
#include "en.h"
#include <net/xdp_sock_drv.h>
+#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
+
/* RX data path */
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
@@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
+retry:
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
*/
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
+ /* MTT page mapping has alignment requirements. If they are not
+ * satisfied, leak the descriptor so that it won't come again, and try
+ * to allocate a new one.
+ */
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
+ xsk_buff_discard(dma_info->xsk);
+ goto retry;
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 04c0a5e1c89a..1839f1ab1ddd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -194,4 +194,14 @@ static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
}
+
+static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
+{
+ return mlx5e_ktls_init_tx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mlx5e_ktls_cleanup_tx(priv);
+}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 3ae6067c7e6b..20a4f1e585af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -267,7 +267,7 @@ out:
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+ struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs.accel_tcp->tables[i].t;
+ dest.ft = priv->fs->accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
- fs_tcp = priv->fs.accel_tcp;
+ fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
{
int i;
- if (!priv->fs.accel_tcp)
+ if (!priv->fs->accel_tcp)
return;
accel_fs_tcp_disable(priv);
@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
- if (!priv->fs.accel_tcp)
+ priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
+ if (!priv->fs->accel_tcp)
return -ENOMEM;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
@@ -396,7 +396,7 @@ err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
- kfree(priv->fs.accel_tcp);
- priv->fs.accel_tcp = NULL;
+ kfree(priv->fs->accel_tcp);
+ priv->fs->accel_tcp = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 8315e8f603d7..f8113fd23265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
deleted file mode 100644
index e4eeb2ba21c7..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_STEERING_H__
-#define __MLX5_IPSEC_STEERING_H__
-
-#include "en.h"
-#include "ipsec.h"
-#include "ipsec_offload.h"
-#include "en/fs.h"
-
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5e_ipsec_rule *ipsec_rule);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5_accel_esp_xfrm_attrs *attrs,
- struct mlx5e_ipsec_rule *ipsec_rule);
-#endif /* __MLX5_IPSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 814f2a56f633..30a70d139046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
+ if (!mlx5e_ktls_type_check(mdev, crypto_info))
return -EOPNOTSUPP;
if (direction == TLS_OFFLOAD_CTX_DIR_TX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index d016624fbc9d..948400dee525 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -42,6 +42,8 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv);
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
@@ -62,6 +64,8 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_del;
+ atomic64_t tx_tls_pool_alloc;
+ atomic64_t tx_tls_pool_free;
atomic64_t rx_tls_ctx;
atomic64_t rx_tls_del;
};
@@ -69,6 +73,7 @@ struct mlx5e_tls_sw_stats {
struct mlx5e_tls {
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
+ struct mlx5e_tls_tx_pool *tx_pool;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
@@ -83,6 +88,15 @@ static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
}
+static inline int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+}
+
static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 2ab46c4247ff..7c1c0eb16787 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -41,6 +41,8 @@
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_alloc) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_pool_free) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f239fb2e832f..6b6c7044b64a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -35,30 +35,70 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += 1; /* fence nop */
return stop_room;
}
+static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
+{
+ MLX5_SET(tisc, tisc, tls_en, 1);
+ MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
+}
+
static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
- MLX5_SET(tisc, tisc, tls_en, 1);
+ return mlx5_core_create_tis(mdev, in, tisn);
+}
+
+static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+
+ mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
+}
+
+static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
+ struct mlx5_async_ctx *async_ctx,
+ u32 *out, int outlen,
+ mlx5_async_cbk_t callback,
+ struct mlx5_async_work *context)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
- return mlx5e_create_tis(mdev, in, tisn);
+ return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
+ out, outlen, callback, context);
}
struct mlx5e_ktls_offload_context_tx {
- struct tls_offload_context_tx *tx_ctx;
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
- struct mlx5e_tls_sw_stats *sw_stats;
+ /* fast path */
u32 expected_seq;
u32 tisn;
- u32 key_id;
bool ctx_post_pending;
+ /* control / resync */
+ struct list_head list_node; /* member of the pool */
+ struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ struct tls_offload_context_tx *tx_ctx;
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ u32 key_id;
+ u8 create_err : 1;
};
static void
@@ -82,28 +122,368 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
return *ctx;
}
+/* struct for callback API management */
+struct mlx5e_async_ctx {
+ struct mlx5_async_work context;
+ struct mlx5_async_ctx async_ctx;
+ struct work_struct work;
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct completion complete;
+ int err;
+ union {
+ u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
+ u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
+ };
+};
+
+static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+{
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ if (!bulk_async)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
+ init_completion(&async->complete);
+ }
+
+ return bulk_async;
+}
+
+static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
+ }
+ kvfree(bulk_async);
+}
+
+static void create_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ if (status) {
+ async->err = status;
+ priv_tx->create_err = 1;
+ goto out;
+ }
+
+ priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
+out:
+ complete(&async->complete);
+}
+
+static void destroy_tis_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5e_async_ctx *async =
+ container_of(context, struct mlx5e_async_ctx, context);
+ struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
+
+ complete(&async->complete);
+ kfree(priv_tx);
+}
+
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
+ struct mlx5e_async_ctx *async)
+{
+ struct mlx5e_ktls_offload_context_tx *priv_tx;
+ int err;
+
+ priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+ if (!priv_tx)
+ return ERR_PTR(-ENOMEM);
+
+ priv_tx->mdev = mdev;
+ priv_tx->sw_stats = sw_stats;
+
+ if (!async) {
+ err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+ if (err)
+ goto err_out;
+ } else {
+ async->priv_tx = priv_tx;
+ err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ async->out_create, sizeof(async->out_create),
+ create_tis_callback, &async->context);
+ if (err)
+ goto err_out;
+ }
+
+ return priv_tx;
+
+err_out:
+ kfree(priv_tx);
+ return ERR_PTR(err);
+}
+
+static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
+ struct mlx5e_async_ctx *async)
+{
+ if (priv_tx->create_err) {
+ complete(&async->complete);
+ kfree(priv_tx);
+ return;
+ }
+ async->priv_tx = priv_tx;
+ mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
+ &async->async_ctx,
+ async->out_destroy, sizeof(async->out_destroy),
+ destroy_tis_callback, &async->context);
+}
+
+static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
+ struct list_head *list, int size)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ int i;
+
+ bulk_async = mlx5e_bulk_async_init(mdev, size);
+ if (!bulk_async)
+ return;
+
+ i = 0;
+ list_for_each_entry(obj, list, list_node) {
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ i++;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct mlx5e_async_ctx *async = &bulk_async[i];
+
+ wait_for_completion(&async->complete);
+ }
+ mlx5e_bulk_async_cleanup(bulk_async, size);
+}
+
+/* Recycling pool API */
+
+#define MLX5E_TLS_TX_POOL_BULK (16)
+#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
+#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
+
+struct mlx5e_tls_tx_pool {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_sw_stats *sw_stats;
+ struct mutex lock; /* Protects access to the pool */
+ struct list_head list;
+ size_t size;
+
+ struct workqueue_struct *wq;
+ struct work_struct create_work;
+ struct work_struct destroy_work;
+};
+
+static void create_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, create_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ struct mlx5e_async_ctx *bulk_async;
+ LIST_HEAD(local_list);
+ int i, j, err = 0;
+
+ bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
+ if (!bulk_async)
+ return;
+
+ for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+ list_add(&obj->list_node, &local_list);
+ }
+
+ for (j = 0; j < i; j++) {
+ struct mlx5e_async_ctx *async = &bulk_async[j];
+
+ wait_for_completion(&async->complete);
+ if (!err && async->err)
+ err = async->err;
+ }
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
+ mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ if (err)
+ goto err_out;
+
+ mutex_lock(&pool->lock);
+ if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ goto err_out;
+ }
+ list_splice(&local_list, &pool->list);
+ pool->size += MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+ return;
+
+err_out:
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
+ atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static void destroy_work(struct work_struct *work)
+{
+ struct mlx5e_tls_tx_pool *pool =
+ container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
+ mutex_unlock(&pool->lock);
+ return;
+ }
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, work);
+ mutex_unlock(&pool->lock);
+
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+}
+
+static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
+ struct mlx5e_tls_sw_stats *sw_stats)
+{
+ struct mlx5e_tls_tx_pool *pool;
+
+ BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
+
+ pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
+ if (!pool->wq)
+ goto err_free;
+
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->lock);
+
+ INIT_WORK(&pool->create_work, create_work);
+ INIT_WORK(&pool->destroy_work, destroy_work);
+
+ pool->mdev = mdev;
+ pool->sw_stats = sw_stats;
+
+ return pool;
+
+err_free:
+ kvfree(pool);
+ return NULL;
+}
+
+static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
+ struct mlx5e_ktls_offload_context_tx *obj;
+ LIST_HEAD(local_list);
+ int i = 0;
+
+ list_for_each_entry(obj, &pool->list, list_node)
+ if (++i == MLX5E_TLS_TX_POOL_BULK)
+ break;
+
+ list_cut_position(&local_list, &pool->list, &obj->list_node);
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
+ atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
+ pool->size -= MLX5E_TLS_TX_POOL_BULK;
+ }
+ if (pool->size) {
+ mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
+ atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
+ }
+}
+
+static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
+{
+ mlx5e_tls_tx_pool_list_cleanup(pool);
+ destroy_workqueue(pool->wq);
+ kvfree(pool);
+}
+
+static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
+{
+ mutex_lock(&pool->lock);
+ list_add(&obj->list_node, &pool->list);
+ if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
+ queue_work(pool->wq, &pool->destroy_work);
+ mutex_unlock(&pool->lock);
+}
+
+static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
+{
+ struct mlx5e_ktls_offload_context_tx *obj;
+
+ mutex_lock(&pool->lock);
+ if (unlikely(pool->size == 0)) {
+ /* pool is empty:
+ * - trigger the populating work, and
+ * - serve the current context via the regular blocking api.
+ */
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
+ if (!IS_ERR(obj))
+ atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
+ return obj;
+ }
+
+ obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
+ list_node);
+ list_del(&obj->list_node);
+ if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
+ queue_work(pool->wq, &pool->create_work);
+ mutex_unlock(&pool->lock);
+ return obj;
+}
+
+/* End of pool API */
+
int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
+ struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
- priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
- if (!priv_tx)
- return -ENOMEM;
+ priv_tx = pool_pop(pool);
+ if (IS_ERR(priv_tx))
+ return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+ err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
if (err)
goto err_create_key;
- priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,36 +491,29 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
- err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
- if (err)
- goto err_create_tis;
-
priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0;
-err_create_tis:
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
err_create_key:
- kfree(priv_tx);
+ pool_push(pool, priv_tx);
return err;
}
void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct mlx5_core_dev *mdev;
+ struct mlx5e_tls_tx_pool *pool;
struct mlx5e_priv *priv;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
+ pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5e_destroy_tis(mdev, priv_tx->tisn);
- mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
- kfree(priv_tx);
+ mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ pool_push(pool, priv_tx);
}
static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -201,6 +574,16 @@ post_progress_params(struct mlx5e_txqsq *sq,
sq->pc += num_wqebbs;
}
+static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ tx_fill_wi(sq, pi, 1, 0, NULL);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
@@ -212,6 +595,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
post_static_params(sq, priv_tx, fence_first_post);
post_progress_params(sq, priv_tx, progress_fence);
+ tx_post_fence_nop(sq);
}
struct tx_sync_info {
@@ -304,7 +688,7 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
}
static int
-tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
+tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
{
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg;
@@ -326,7 +710,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
cseg->tis_tir_num = cpu_to_be32(tisn << 8);
- cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -361,67 +744,39 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
stats->tls_dump_bytes += wi->num_bytes;
}
-static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
- tx_fill_wi(sq, pi, 1, 0, NULL);
-
- mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
-}
-
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
int datalen,
u32 seq)
{
- struct mlx5e_sq_stats *stats = sq->stats;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
- int i = 0;
+ int i;
ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
- if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
- if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
- stats->tls_skip_no_sync_data++;
- return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
- }
- /* We might get here if a retransmission reaches the driver
- * after the relevant record is acked.
+ if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
+ /* We might get here with ret == FAIL if a retransmission
+ * reaches the driver after the relevant record is acked.
* It should be safe to drop the packet in this case
*/
- stats->tls_drop_no_sync_data++;
- goto err_out;
- }
-
- stats->tls_ooo++;
+ return ret;
tx_post_resync_params(sq, priv_tx, info.rcd_sn);
- /* If no dump WQE was sent, we need to have a fence NOP WQE before the
- * actual data xmit.
- */
- if (!info.nr_frags) {
- tx_post_fence_nop(sq);
- return MLX5E_KTLS_SYNC_DONE;
- }
-
- for (; i < info.nr_frags; i++) {
+ for (i = 0; i < info.nr_frags; i++) {
unsigned int orig_fsz, frag_offset = 0, n = 0;
skb_frag_t *f = &info.frags[i];
orig_fsz = skb_frag_size(f);
do {
- bool fence = !(i || frag_offset);
unsigned int fsz;
n++;
fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
skb_frag_size_set(f, fsz);
- if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+ if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
page_ref_add(skb_frag_page(f), n - 1);
goto err_out;
}
@@ -457,7 +812,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
int datalen;
u32 seq;
- datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ datalen = skb->len - skb_tcp_all_headers(skb);
if (!datalen)
return true;
@@ -469,24 +824,27 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
- if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
+ if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
- }
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+ stats->tls_ooo++;
+
switch (ret) {
case MLX5E_KTLS_SYNC_DONE:
break;
case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+ stats->tls_skip_no_sync_data++;
if (likely(!skb->decrypted))
goto out;
WARN_ON_ONCE(1);
- fallthrough;
+ goto err_out;
case MLX5E_KTLS_SYNC_FAIL:
+ stats->tls_drop_no_sync_data++;
goto err_out;
}
}
@@ -505,3 +863,24 @@ err_out:
dev_kfree_skb_any(skb);
return false;
}
+
+int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return 0;
+
+ priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
+ if (!priv->tls->tx_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
+{
+ if (!mlx5e_is_ktls_tx(priv->mdev))
+ return;
+
+ mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
+ priv->tls->tx_pool = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 49cca6bd49a1..cd7f245dcf14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
+ dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
int i;
arfs_del_rules(priv);
- destroy_workqueue(priv->fs.arfs->wq);
+ destroy_workqueue(priv->fs->arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
}
}
@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
return;
_mlx5e_cleanup_tables(priv);
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
+ struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -321,7 +321,7 @@ out:
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
- priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
- if (!priv->fs.arfs)
+ priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
+ if (!priv->fs->arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs.arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs.arfs->rules);
- priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs.arfs->wq)
+ spin_lock_init(&priv->fs->arfs->arfs_lock);
+ INIT_LIST_HEAD(&priv->fs->arfs->rules);
+ priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs->arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
err_des:
_mlx5e_cleanup_tables(priv);
err:
- kvfree(priv->fs.arfs);
+ kvfree(priv->fs->arfs);
return err;
}
@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs.arfs->arfs_lock);
+ spin_lock_bh(&priv->fs->arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs.arfs->arfs_lock);
+ spin_unlock_bh(&priv->fs->arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
+ struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
+ queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6e80585d731f..b811207fe5ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#include <linux/ethtool_netlink.h>
+
#include "en.h"
#include "en/port.h"
#include "en/params.h"
@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
+
+ kernel_param->tcp_data_split =
+ (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__);
@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached.
*/
- if (priv->htb.maj_id) {
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d2f0773f95c6..e2a9b9be5c1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,13 +37,13 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
-#include "en_rep.h"
+#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai);
enum {
@@ -132,9 +132,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
return vlan->ft.t;
}
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
{
- struct net_device *ndev = priv->netdev;
int max_list_size;
int list_size;
u16 *vlans;
@@ -143,15 +142,15 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID)
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
list_size++;
- max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+ max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- netdev_warn(ndev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ mlx5_core_warn(fs->mdev,
+ "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -160,16 +159,16 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
}
- err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -183,18 +182,18 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs.l2.ft.t;
+ dest.ft = fs->l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -204,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
- rule_p = &priv->fs.vlan->untagged_rule;
+ rule_p = &fs->vlan->untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- rule_p = &priv->fs.vlan->any_cvlan_rule;
+ rule_p = &fs->vlan->any_cvlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- rule_p = &priv->fs.vlan->any_svlan_rule;
+ rule_p = &fs->vlan->any_svlan_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- rule_p = &priv->fs.vlan->active_svlans_rule[vid];
+ rule_p = &fs->vlan->active_svlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
@@ -231,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
vid);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
- rule_p = &priv->fs.vlan->active_cvlans_rule[vid];
+ rule_p = &fs->vlan->active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -250,13 +249,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
}
return err;
}
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
struct mlx5_flow_spec *spec;
@@ -267,68 +266,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
return -ENOMEM;
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+ err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
kvfree(spec);
return err;
}
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
- enum mlx5e_vlan_rule_type rule_type, u16 vid)
+static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->fs.vlan->untagged_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->untagged_rule);
- priv->fs.vlan->untagged_rule = NULL;
+ if (fs->vlan->untagged_rule) {
+ mlx5_del_flow_rules(fs->vlan->untagged_rule);
+ fs->vlan->untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- if (priv->fs.vlan->any_cvlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule);
- priv->fs.vlan->any_cvlan_rule = NULL;
+ if (fs->vlan->any_cvlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
+ fs->vlan->any_cvlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- if (priv->fs.vlan->any_svlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule);
- priv->fs.vlan->any_svlan_rule = NULL;
+ if (fs->vlan->any_svlan_rule) {
+ mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
+ fs->vlan->any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
- if (priv->fs.vlan->active_svlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]);
- priv->fs.vlan->active_svlans_rule[vid] = NULL;
+ if (fs->vlan->active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
+ fs->vlan->active_svlans_rule[vid] = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
- if (priv->fs.vlan->active_cvlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]);
- priv->fs.vlan->active_cvlans_rule[vid] = NULL;
+ if (fs->vlan->active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
+ fs->vlan->active_cvlans_rule[vid] = NULL;
}
- mlx5e_vport_context_update_vlans(priv);
+ mlx5e_vport_context_update_vlans(fs);
break;
}
}
-static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
{
int err;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
if (err)
return err;
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+ return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
@@ -354,101 +353,101 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.vlan->ft.t;
+ struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.vlan->trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->vlan->trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.vlan->trap_rule = rule;
+ priv->fs->vlan->trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs.vlan->trap_rule);
- priv->fs.vlan->trap_rule = NULL;
+ if (priv->fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
+ priv->fs->vlan->trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs.l2.trap_rule = NULL;
- netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ priv->fs->l2.trap_rule = NULL;
+ mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs.l2.trap_rule = rule;
+ priv->fs->l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
{
- if (priv->fs.l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs.l2.trap_rule);
- priv->fs.l2.trap_rule = NULL;
+ if (priv->fs->l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs->l2.trap_rule);
+ priv->fs->l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan->cvlan_filter_disabled)
+ if (!priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = false;
+ priv->fs->vlan->cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan->cvlan_filter_disabled)
+ if (priv->fs->vlan->cvlan_filter_disabled)
return;
- priv->fs.vlan->cvlan_filter_disabled = true;
+ priv->fs->vlan->cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
{
int err;
- set_bit(vid, priv->fs.vlan->active_cvlans);
+ set_bit(vid, fs->vlan->active_cvlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
if (err)
- clear_bit(vid, priv->fs.vlan->active_cvlans);
+ clear_bit(vid, fs->vlan->active_cvlans);
return err;
}
-static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev, u16 vid)
{
- struct net_device *netdev = priv->netdev;
int err;
- set_bit(vid, priv->fs.vlan->active_svlans);
+ set_bit(vid, fs->vlan->active_svlans);
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
if (err) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
+ clear_bit(vid, fs->vlan->active_svlans);
return err;
}
@@ -457,86 +456,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
return err;
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q)
- return mlx5e_vlan_rx_add_cvid(priv, vid);
+ return mlx5e_vlan_rx_add_cvid(fs, vid);
else if (be16_to_cpu(proto) == ETH_P_8021AD)
- return mlx5e_vlan_rx_add_svid(priv, vid);
+ return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
return -EOPNOTSUPP;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (mlx5e_is_uplink_rep(priv))
- return 0; /* no vlan table for uplink rep */
+ if (!fs->vlan) {
+ mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ return -EINVAL;
+ }
if (be16_to_cpu(proto) == ETH_P_8021Q) {
- clear_bit(vid, priv->fs.vlan->active_cvlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ clear_bit(vid, fs->vlan->active_cvlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
} else if (be16_to_cpu(proto) == ETH_P_8021AD) {
- clear_bit(vid, priv->fs.vlan->active_svlans);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
- netdev_update_features(dev);
+ clear_bit(vid, fs->vlan->active_svlans);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(netdev);
}
return 0;
}
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_add_any_vid_rules(fs);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
{
int i;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ WARN_ON_ONCE(priv->fs->state_destroy);
mlx5e_remove_vlan_trap(priv);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs.vlan->cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
+ if (priv->fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(priv->fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
@@ -547,9 +551,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
switch (action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(mac_addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
+ l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
@@ -557,52 +561,50 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
- mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
+ mlx5e_del_l2_flow_rule(fs, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
}
if (l2_err)
- netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
struct netdev_hw_addr *ha;
netif_addr_lock_bh(netdev);
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
- priv->netdev->dev_addr);
-
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
+ struct net_device *ndev,
u8 addr_array[][ETH_ALEN], int size)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct net_device *ndev = priv->netdev;
struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->fs.l2.broadcast_enabled)
+ else if (fs->l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -614,7 +616,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
}
}
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
@@ -627,19 +630,19 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
- 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+ addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
if (size > max_size) {
- netdev_warn(priv->netdev,
- "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ mlx5_core_warn(fs->mdev,
+ "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -649,65 +652,67 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
err = -ENOMEM;
goto out;
}
- mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
}
- err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+ err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- netdev_err(priv->netdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ mlx5_core_err(fs->mdev,
+ "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_l2_table *ea = &priv->fs.l2;
+ struct mlx5e_l2_table *ea = &fs->l2;
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
- mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
- mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(fs->mdev, 0,
ea->allmulti_enabled,
ea->promisc_enabled);
}
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
+ mlx5e_execute_l2_action(fs, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
- mlx5e_execute_l2_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
+ mlx5e_execute_l2_action(fs, hn);
}
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
- if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_sync_netdev_addr(priv);
+ if (fs->state_destroy)
+ mlx5e_sync_netdev_addr(fs, netdev);
- mlx5e_apply_netdev_addr(priv);
+ mlx5e_apply_netdev_addr(fs);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
-static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
{
- struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_table *ft = fs->promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
@@ -718,22 +723,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
- rule_p = &priv->fs.promisc.rule;
+ rule_p = &fs->promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
-static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5e_flow_table *ft = &fs->promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -742,14 +747,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
return err;
}
- err = mlx5e_add_promisc_rule(priv);
+ err = mlx5e_add_promisc_rule(fs);
if (err)
goto err_destroy_promisc_table;
@@ -762,34 +767,31 @@ err_destroy_promisc_table:
return err;
}
-static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
- mlx5_del_flow_rules(priv->fs.promisc.rule);
- priv->fs.promisc.rule = NULL;
+ mlx5_del_flow_rules(fs->promisc.rule);
+ fs->promisc.rule = NULL;
}
-static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
{
- if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
- mlx5e_del_promisc_rule(priv);
- mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
- priv->fs.promisc.ft.t = NULL;
+ mlx5e_del_promisc_rule(fs);
+ mlx5_destroy_flow_table(fs->promisc.ft.t);
+ fs->promisc.ft.t = NULL;
}
-void mlx5e_set_rx_mode_work(struct work_struct *work)
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev)
{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- set_rx_mode_work);
+ struct mlx5e_l2_table *ea = &fs->l2;
- struct mlx5e_l2_table *ea = &priv->fs.l2;
- struct net_device *ndev = priv->netdev;
-
- bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
- bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
- bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool rx_mode_enable = fs->state_destroy;
+ bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
bool broadcast_enabled = rx_mode_enable;
bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
@@ -801,32 +803,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
int err;
if (enable_promisc) {
- err = mlx5e_create_promisc_table(priv);
+ err = mlx5e_create_promisc_table(fs);
if (err)
enable_promisc = false;
- if (!priv->channels.params.vlan_strip_disable && !err)
- netdev_warn_once(ndev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ if (!fs->vlan_strip_disable && !err)
+ mlx5_core_warn_once(fs->mdev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
- mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
- mlx5e_handle_netdev_addr(priv);
+ mlx5e_handle_netdev_addr(fs, netdev);
if (disable_broadcast)
- mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
if (disable_promisc)
- mlx5e_destroy_promisc_table(priv);
+ mlx5e_destroy_promisc_table(fs);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
ea->broadcast_enabled = broadcast_enabled;
- mlx5e_vport_context_update(priv);
+ mlx5e_vport_context_update(fs, netdev);
}
static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
@@ -841,9 +843,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
{
- ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
}
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
@@ -861,7 +863,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -884,7 +886,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -898,18 +900,18 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs.inner_ttc);
+ mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
}
}
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai)
{
if (!IS_ERR_OR_NULL(ai->rule)) {
@@ -918,10 +920,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
}
}
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type)
{
- struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
@@ -939,7 +941,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
switch (type) {
case MLX5E_FULLMATCH:
@@ -957,8 +959,8 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,12 +1046,12 @@ err_destroy_groups:
static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+ mlx5e_destroy_flow_table(&priv->fs->l2.ft);
}
static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_l2_table *l2_table = &priv->fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1060,7 +1062,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1180,20 +1182,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
return err;
}
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft;
int err;
- ft = &priv->fs.vlan->ft;
+ ft = &fs->vlan->ft;
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
ft_attr.level = MLX5E_VLAN_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t))
return PTR_ERR(ft->t);
@@ -1207,7 +1209,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- mlx5e_add_vlan_rules(priv);
+ mlx5e_fs_add_vlan_rules(fs);
return 0;
@@ -1222,33 +1224,33 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
+ mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs.inner_ttc);
+ mlx5_destroy_ttc_table(priv->fs->inner_ttc);
}
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
return 0;
mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs.inner_ttc))
- return PTR_ERR(priv->fs.inner_ttc);
+ priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
+ &ttc_params);
+ if (IS_ERR(priv->fs->inner_ttc))
+ return PTR_ERR(priv->fs->inner_ttc);
return 0;
}
@@ -1257,9 +1259,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc))
- return PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc))
+ return PTR_ERR(priv->fs->ttc);
return 0;
}
@@ -1267,45 +1269,44 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
+ err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev,
- "Failed to create inner ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev,
+ "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
+ err);
goto err_destroy_inner_ttc_table;
}
err = mlx5e_create_l2_table(priv);
if (err) {
- netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
+ err);
goto err_destroy_ttc_table;
}
- err = mlx5e_create_vlan_table(priv);
+ err = mlx5e_fs_create_vlan_table(priv->fs);
if (err) {
- netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
- err);
+ mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
+ err);
goto err_destroy_l2_table;
}
@@ -1342,16 +1343,69 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
}
-int mlx5e_fs_init(struct mlx5e_priv *priv)
+static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
+ if (!fs->vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
{
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
+ kvfree(fs->vlan);
+}
+
+static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
+{
+ fs->tc = mlx5e_tc_table_alloc();
+ if (IS_ERR(fs->tc))
return -ENOMEM;
return 0;
}
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_tc_table_free(fs->tc);
+}
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy)
+{
+ struct mlx5e_flow_steering *fs;
+ int err;
+
+ fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ goto err;
+
+ fs->mdev = mdev;
+ fs->state_destroy = state_destroy;
+ if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
+ err = mlx5e_fs_vlan_alloc(fs);
+ if (err)
+ goto err_free_fs;
+ }
+
+ if (mlx5e_profile_feature_cap(profile, FS_TC)) {
+ err = mlx5e_fs_tc_alloc(fs);
+ if (err)
+ goto err_free_vlan;
+ }
+
+ return fs;
+err_free_fs:
+ kvfree(fs);
+err_free_vlan:
+ mlx5e_fs_vlan_free(fs);
+err:
+ return NULL;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
+ mlx5e_fs_tc_free(fs);
+ mlx5e_fs_vlan_free(fs);
+ kvfree(fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index ad0d234632a3..3e4bc7836ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ eth_ft = &priv->fs->ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs.ethtool.rules;
+ struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs.ethtool.tot_num_rules++;
+ priv->fs->ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs.ethtool.tot_num_rules--;
+ priv->fs->ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -742,10 +742,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
- if (!eth_ft->ft) {
- err = -EINVAL;
- goto del_ethtool_rule;
- }
+
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -791,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -834,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
- INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+ INIT_LIST_HEAD(&priv->fs->ethtool.rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -966,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ info->rule_cnt = priv->fs->ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 087952b84ccb..d858667736a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,7 +31,6 @@
*/
#include <net/tc_act/tc_gact.h>
-#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
@@ -64,6 +63,7 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
@@ -1912,8 +1912,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
{
int tc;
- if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
- !params->mqprio.channel.rl) {
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
*hw_id = 0;
return 0;
}
@@ -1922,7 +1921,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
if (tc < 0)
return tc;
- return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+ if (tc >= params->mqprio.num_tc) {
+ WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
+ tc, params->mqprio.num_tc);
+ return -EINVAL;
+ }
+
+ *hw_id = params->mqprio.channel.hw_id[tc];
+ return 0;
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
@@ -2383,9 +2389,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- err = mlx5e_qos_open_queues(priv, chs);
- if (err)
- goto err_close_ptp;
+ if (priv->htb) {
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+ }
mlx5e_health_channels_update(priv);
kvfree(cparam);
@@ -2567,9 +2575,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{
- int qos_queues, nch, ntc, num_txqs, err;
+ int nch, ntc, num_txqs, err;
+ int qos_queues = 0;
- qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+ if (priv->htb)
+ qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
@@ -2615,13 +2625,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
- if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
- priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
- }
return 0;
@@ -2724,7 +2727,8 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
- mlx5e_qos_activate_queues(priv);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2841,7 +2845,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
new_chs.params = *params;
- mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
err = mlx5e_open_channels(priv, &new_chs);
if (err)
@@ -2897,7 +2901,7 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+ mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state);
@@ -3135,6 +3139,12 @@ err_close_tises:
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+ mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
@@ -3203,19 +3213,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
- params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
+static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
+ struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
+ u32 hw_id = 0;
+
+ if (rl)
+ mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
+ params->mqprio.channel.hw_id[tc] = hw_id;
+ }
+}
+
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt,
+ struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl)
{
+ int tc;
+
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
- params->mqprio.num_tc = qopt->num_tc;
- params->mqprio.channel.rl = rl;
- mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
+ params->mqprio.num_tc = mqprio->qopt.num_tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
+
+ mlx5e_mqprio_rl_update_params(params, rl);
+ mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
@@ -3241,6 +3270,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
+ if (!err && priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ priv->mqprio_rl = NULL;
+ }
+
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
@@ -3299,16 +3334,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
-static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{
int tc;
- for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
- if (mqprio->max_rate[tc])
+ for (tc = 0; tc < num_tc; tc++)
+ if (max_rate[tc])
return true;
return false;
}
+static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
+ u8 num_tc, u64 max_rate[])
+{
+ struct mlx5e_mqprio_rl *rl;
+ int err;
+
+ if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
+ return NULL;
+
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return ERR_PTR(err);
+ }
+
+ return rl;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -3322,32 +3379,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
- rl = NULL;
- if (mlx5e_mqprio_rate_limit(mqprio)) {
- rl = mlx5e_mqprio_rl_alloc();
- if (!rl)
- return -ENOMEM;
- err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
- mqprio->max_rate);
- if (err) {
- mlx5e_mqprio_rl_free(rl);
- return err;
- }
- }
+ rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
+ if (IS_ERR(rl))
+ return PTR_ERR(rl);
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
+ mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
- if (err && rl) {
- mlx5e_mqprio_rl_cleanup(rl);
- mlx5e_mqprio_rl_free(rl);
+ if (err) {
+ if (rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+ return err;
}
- return err;
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = rl;
+
+ return 0;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3356,7 +3413,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(priv->htb.maj_id))
+ if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL;
switch (mqprio->mode) {
@@ -3369,47 +3426,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
}
}
-static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
-{
- int res;
-
- switch (htb->command) {
- case TC_HTB_CREATE:
- return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
- htb->extack);
- case TC_HTB_DESTROY:
- return mlx5e_htb_root_del(priv);
- case TC_HTB_LEAF_ALLOC_QUEUE:
- res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
- htb->rate, htb->ceil, htb->extack);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- case TC_HTB_LEAF_TO_INNER:
- return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
- htb->rate, htb->ceil, htb->extack);
- case TC_HTB_LEAF_DEL:
- return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
- case TC_HTB_LEAF_DEL_LAST:
- case TC_HTB_LEAF_DEL_LAST_FORCE:
- return mlx5e_htb_leaf_del_last(priv, htb->classid,
- htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
- htb->extack);
- case TC_HTB_NODE_MODIFY:
- return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
- htb->extack);
- case TC_HTB_LEAF_QUERY_QUEUE:
- res = mlx5e_get_txq_by_classid(priv, htb->classid);
- if (res < 0)
- return res;
- htb->qid = res;
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
-
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3443,7 +3459,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
- err = mlx5e_setup_tc_htb(priv, type_data);
+ err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
default:
@@ -3594,20 +3610,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
- priv->xsk.refcnt);
- err = -EINVAL;
- goto out;
- }
-
cur_params = &priv->channels.params;
- if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- netdev_warn(netdev, "can't set LRO with legacy RQ\n");
- err = -EINVAL;
- goto out;
- }
-
new_params = *cur_params;
if (enable)
@@ -3676,6 +3679,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
@@ -3685,12 +3689,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
}
#endif
- if (!enable && priv->htb.maj_id) {
+ mutex_lock(&priv->state_lock);
+ if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
- return -EINVAL;
+ err = -EINVAL;
}
+ mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
static int set_feature_rx_all(struct net_device *netdev, bool enable)
@@ -3772,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
+ priv->fs->vlan_strip_disable = !enable;
priv->channels.params.vlan_strip_disable = !enable;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
- if (err)
+ if (err) {
+ priv->fs->vlan_strip_disable = enable;
priv->channels.params.vlan_strip_disable = enable;
-
+ }
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_flow_steering *fs = priv->fs;
+
+ if (mlx5e_is_uplink_rep(priv))
+ return 0; /* no vlan table for uplink rep */
+
+ return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
+}
+
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
@@ -3883,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs.vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
+ if (!priv->fs->vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -3916,6 +3947,11 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
if (priv->xsk.refcnt) {
+ if (features & NETIF_F_LRO) {
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
+ priv->xsk.refcnt);
+ features &= ~NETIF_F_LRO;
+ }
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
@@ -5002,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -5009,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
- err = mlx5e_fs_init(priv);
- if (err) {
+ fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!fs) {
+ err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
+ priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
@@ -5032,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_fs_cleanup(priv);
+ mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5110,6 +5150,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params;
+ struct mlx5e_mqprio_rl *rl;
+
+ params = &priv->channels.params;
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
+ return;
+
+ rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
+ params->mqprio.channel.max_rate);
+ if (IS_ERR(rl))
+ rl = NULL;
+ priv->mqprio_rl = rl;
+ mlx5e_mqprio_rl_update_params(params, rl);
+}
+
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
@@ -5120,8 +5177,17 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err;
}
+ err = mlx5e_accel_init_tx(priv);
+ if (err)
+ goto err_destroy_tises;
+
+ mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
+
+err_destroy_tises:
+ mlx5e_destroy_tises(priv);
+ return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
@@ -5129,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, netdev);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
@@ -5214,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
- BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
+ BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
+ BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
@@ -5264,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
+}
+
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
@@ -5293,7 +5369,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (err)
goto err_free_cpumask;
- hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5350,14 +5425,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
- for (i = 0; i < priv->htb.max_qos_sqs; i++)
- kfree(priv->htb.qos_sq_stats[i]);
- kvfree(priv->htb.qos_sq_stats);
-
- if (priv->mqprio_rl) {
- mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
- mlx5e_mqprio_rl_free(priv->mqprio_rl);
- }
+ for (i = 0; i < priv->htb_max_qos_sqs; i++)
+ kfree(priv->htb_qos_sq_stats[i]);
+ kvfree(priv->htb_qos_sq_stats);
memset(priv, 0, sizeof(*priv));
}
@@ -5447,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5506,6 +5578,8 @@ err_cleanup_tx:
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5515,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (priv->fs)
+ priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f797fd97d305..4c1599de652c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int
@@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
+ mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
@@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
@@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs.ttc)) {
- err = PTR_ERR(priv->fs.ttc);
+ priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
+ if (IS_ERR(priv->fs->ttc)) {
+ err = PTR_ERR(priv->fs->ttc);
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
return 0;
}
@@ -835,11 +836,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
- mlx5e_init_l2_addr(priv);
+ mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
@@ -873,13 +883,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -888,7 +900,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs.ttc);
+ mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index adf5cc6a7b8c..dec183ccd4ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -62,6 +62,7 @@ struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
+struct mlx5e_flow_meters;
struct mlx5_rep_uplink_priv {
/* indirect block callbacks are invoked on bind/unbind events
@@ -97,6 +98,8 @@ struct mlx5_rep_uplink_priv {
/* OVS internal port support */
struct mlx5e_tc_int_port_priv *int_port_priv;
+
+ struct mlx5e_flow_meters *flow_meters;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1e87bb2b7541..7409829d1201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
- max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
- stats = READ_ONCE(priv->htb.qos_sq_stats);
+ max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+ stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e48b15b55b6f..ed4fc940e4ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe;
u64 abort;
u64 abort_abs_diff_ns;
+ u64 resync_cqe;
+ u64 resync_event;
};
struct mlx5e_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ca2c8763237..f154bda668ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -59,6 +59,7 @@
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
+#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -70,6 +71,30 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_tc_table {
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
+ struct mutex t_lock;
+ struct mlx5e_priv *priv;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_table *miss_t;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_post_act *post_act;
+
+ struct rhashtable ht;
+
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
+ DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
+ struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
+ struct mapping_ctx *mapping;
+};
+
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
@@ -104,8 +129,27 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
+ [PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
+{
+ struct mlx5e_tc_table *tc;
+
+ tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
+ return tc ? tc : ERR_PTR(-ENOMEM);
+}
+
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
+{
+ kvfree(tc);
+}
+
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
+{
+ return tc->chains;
+}
+
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
@@ -240,6 +284,30 @@ mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
return NULL;
}
+struct mlx5e_flow_meters *
+mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_priv *priv;
+
+ if (is_mdev_switchdev_mode(dev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ priv = netdev_priv(uplink_rpriv->netdev);
+ if (!uplink_priv->flow_meters)
+ uplink_priv->flow_meters =
+ mlx5e_flow_meters_init(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ uplink_priv->post_act);
+ if (!IS_ERR(uplink_priv->flow_meters))
+ return uplink_priv->flow_meters;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -254,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs.tc.ct;
+ return priv->fs->tc->ct;
}
static struct mlx5e_tc_psample *
@@ -288,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs.tc.post_act;
+ return priv->fs->tc->post_act;
}
struct mlx5_flow_handle *
@@ -319,12 +387,62 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+static bool
+is_flow_meter_action(struct mlx5_flow_attr *attr)
+{
+ return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+}
+
+static int
+mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5e_post_act *post_act = get_post_action(priv);
+ struct mlx5e_post_meter_priv *post_meter;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
+ if (IS_ERR(meter)) {
+ mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
+ return PTR_ERR(meter);
+ }
+
+ ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
+ meter->red_counter);
+ if (IS_ERR(post_meter)) {
+ mlx5_core_err(priv->mdev, "Failed to init post meter\n");
+ goto err_meter_init;
+ }
+
+ attr->meter_attr.meter = meter;
+ attr->meter_attr.post_meter = post_meter;
+ attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ return 0;
+
+err_meter_init:
+ mlx5e_tc_meter_put(meter);
+ return PTR_ERR(post_meter);
+}
+
+static void
+mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+{
+ mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_tc_meter_put(attr->meter_attr.meter);
+}
+
struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int err;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
@@ -341,6 +459,12 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
+ if (is_flow_meter_action(attr)) {
+ err = mlx5e_tc_add_flow_meter(priv, attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -367,6 +491,9 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
}
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ if (attr->meter_attr.meter)
+ mlx5e_tc_del_flow_meter(attr);
}
int
@@ -484,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs.tc.mod_hdr;
+ &priv->fs->tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -702,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
+ mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
return 0;
@@ -792,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+ hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -807,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -894,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -909,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -921,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -999,10 +1126,10 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
@@ -1011,6 +1138,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_table *ft;
int dest_ix = 0;
+ nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
@@ -1035,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
}
dest_ix++;
}
@@ -1063,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs.tc.t);
+ rule = ERR_CAST(priv->fs->tc->t);
goto err_ft_get;
}
}
@@ -1165,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
+ struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
mlx5_del_flow_rules(rule);
@@ -1182,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1194,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs.tc.t_lock);
+ mutex_lock(&priv->fs->tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
- mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs.tc.t = NULL;
+ mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
+ priv->fs->tc->t = NULL;
}
- mutex_unlock(&priv->fs.tc.t_lock);
+ mutex_unlock(&priv->fs->tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -3936,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs.tc.ht;
+ return &priv->fs->tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4519,9 +4647,9 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-static int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+int mlx5e_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -4655,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4674,7 +4802,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
@@ -4685,8 +4812,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
- fs = container_of(tc, struct mlx5e_flow_steering, tc);
- priv = container_of(fs, struct mlx5e_priv, fs);
+ priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
@@ -4715,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
+ struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4737,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs.tc.miss_t);
+ mlx5_destroy_flow_table(priv->fs->tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4753,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
+ tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
@@ -4782,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs.tc.miss_t;
+ attr.default_ft = priv->fs->tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4792,7 +4919,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
}
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
- tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
@@ -4831,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -4955,6 +5082,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
+ mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
@@ -5035,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
int err;
@@ -5060,7 +5188,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index e2a1250aeca1..6ce1ab6b86b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -39,6 +39,7 @@
#include "en/tc_ct.h"
#include "en/tc_tun.h"
#include "en/tc/int_port.h"
+#include "en/tc/meter.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -71,6 +72,7 @@ struct mlx5_flow_attr {
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
+ struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
@@ -83,6 +85,7 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
+ u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
struct {
@@ -229,6 +232,7 @@ enum mlx5e_tc_attr_to_reg {
FTEID_TO_REG,
NIC_CHAIN_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
+ PACKET_COLOR_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -241,6 +245,10 @@ struct mlx5e_tc_attr_to_reg_mapping {
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
+#define MLX5_REG_MAPPING_MOFFSET(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].moffset)
+#define MLX5_REG_MAPPING_MBITS(reg_id) (mlx5e_tc_attr_to_reg_mappings[reg_id].mlen)
+#define MLX5_REG_MAPPING_MASK(reg_id) (GENMASK(mlx5e_tc_attr_to_reg_mappings[reg_id].mlen - 1, 0))
+
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
@@ -348,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
+void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -368,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
+static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
+static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 9a7250be229f..27f791feb517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -152,14 +152,14 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb)) {
*hopbyhop = sizeof(struct hop_jumbo_hdr);
ihs -= sizeof(struct hop_jumbo_hdr);
@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
+static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+ ptpsq->ts_cqe_ctr_mask);
+}
+
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
+ if (unlikely(sq->ptpsq))
+ mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 05e08cec5a8c..4fbff7bcc155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
+#include <linux/build_bug.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <net/netevent.h>
@@ -12,26 +13,57 @@
#define CREATE_TRACE_POINTS
#include "diag/bridge_tracepoint.h"
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE 12000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE 16000
#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
- (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
-
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_UNTAGGED_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE == 64000);
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE 16000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE (32000 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM + \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_SIZE - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO \
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO + 1)
+static_assert(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE == 64000);
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -63,12 +95,14 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_qinq_fg;
struct mlx5_flow_group *egress_mac_fg;
struct mlx5_flow_group *egress_miss_fg;
struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
+ u16 vlan_proto;
};
static void
@@ -138,7 +172,9 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -154,30 +190,53 @@ mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flo
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
+ vlan_proto, PTR_ERR(fg));
return fg;
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ingress_ft)
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned int to,
+ u16 vlan_proto, struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -193,27 +252,48 @@ mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
-
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
"Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
PTR_ERR(fg));
-
kvfree(in);
return fg;
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021Q, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_qinq_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_INGRESS_TABLE_QINQ_FILTER_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(from, to, ETH_P_8021AD, esw,
+ ingress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -250,7 +330,9 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
-mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg;
@@ -265,13 +347,14 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ if (vlan_proto == ETH_P_8021Q)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ else if (vlan_proto == ETH_P_8021AD)
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
- MLX5_SET(create_flow_group_in, in, start_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
- MLX5_SET(create_flow_group_in, in, end_flow_index,
- MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, from);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, to);
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
@@ -283,6 +366,25 @@ mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
}
static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_qinq_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *egress_ft)
+{
+ unsigned int from = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_FROM;
+ unsigned int to = MLX5_ESW_BRIDGE_EGRESS_TABLE_QINQ_GRP_IDX_TO;
+
+ return mlx5_esw_bridge_egress_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, egress_ft);
+}
+
+static struct mlx5_flow_group *
mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -346,7 +448,7 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
- struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_group *mac_fg, *qinq_filter_fg, *qinq_fg, *vlan_filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
struct mlx5_eswitch *esw = br_offloads->esw;
int err;
@@ -374,10 +476,22 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
- if (IS_ERR(filter_fg)) {
- err = PTR_ERR(filter_fg);
- goto err_filter_fg;
+ vlan_filter_fg = mlx5_esw_bridge_ingress_vlan_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(vlan_filter_fg)) {
+ err = PTR_ERR(vlan_filter_fg);
+ goto err_vlan_filter_fg;
+ }
+
+ qinq_fg = mlx5_esw_bridge_ingress_qinq_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
+ qinq_filter_fg = mlx5_esw_bridge_ingress_qinq_filter_fg_create(esw, ingress_ft);
+ if (IS_ERR(qinq_filter_fg)) {
+ err = PTR_ERR(qinq_filter_fg);
+ goto err_qinq_filter_fg;
}
mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
@@ -389,13 +503,19 @@ mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
br_offloads->ingress_ft = ingress_ft;
br_offloads->skip_ft = skip_ft;
br_offloads->ingress_vlan_fg = vlan_fg;
- br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_vlan_filter_fg = vlan_filter_fg;
+ br_offloads->ingress_qinq_fg = qinq_fg;
+ br_offloads->ingress_qinq_filter_fg = qinq_filter_fg;
br_offloads->ingress_mac_fg = mac_fg;
return 0;
err_mac_fg:
- mlx5_destroy_flow_group(filter_fg);
-err_filter_fg:
+ mlx5_destroy_flow_group(qinq_filter_fg);
+err_qinq_filter_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
+ mlx5_destroy_flow_group(vlan_filter_fg);
+err_vlan_filter_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(skip_ft);
@@ -409,8 +529,12 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
{
mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
br_offloads->ingress_mac_fg = NULL;
- mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
- br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_filter_fg);
+ br_offloads->ingress_qinq_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_qinq_fg);
+ br_offloads->ingress_qinq_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_filter_fg);
+ br_offloads->ingress_vlan_filter_fg = NULL;
mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
br_offloads->ingress_vlan_fg = NULL;
mlx5_destroy_flow_table(br_offloads->skip_ft);
@@ -428,7 +552,7 @@ static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg, *qinq_fg;
struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
struct mlx5_flow_handle *miss_handle = NULL;
struct mlx5_eswitch *esw = br_offloads->esw;
@@ -447,6 +571,12 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
goto err_vlan_fg;
}
+ qinq_fg = mlx5_esw_bridge_egress_qinq_fg_create(esw, egress_ft);
+ if (IS_ERR(qinq_fg)) {
+ err = PTR_ERR(qinq_fg);
+ goto err_qinq_fg;
+ }
+
mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
@@ -491,6 +621,7 @@ skip_miss_flow:
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_qinq_fg = qinq_fg;
bridge->egress_mac_fg = mac_fg;
bridge->egress_miss_fg = miss_fg;
bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
@@ -498,6 +629,8 @@ skip_miss_flow:
return 0;
err_mac_fg:
+ mlx5_destroy_flow_group(qinq_fg);
+err_qinq_fg:
mlx5_destroy_flow_group(vlan_fg);
err_vlan_fg:
mlx5_destroy_flow_table(egress_ft);
@@ -515,6 +648,7 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
if (bridge->egress_miss_fg)
mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_qinq_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
}
@@ -559,10 +693,17 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
flow_act.pkt_reformat = vlan->pkt_reformat_push;
flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -645,10 +786,17 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
@@ -696,10 +844,17 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
flow_act.pkt_reformat = vlan->pkt_reformat_pop;
}
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
- outer_headers.cvlan_tag);
+ if (bridge->vlan_proto == ETH_P_8021Q) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ } else if (bridge->vlan_proto == ETH_P_8021AD) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.svlan_tag);
+ }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
@@ -774,6 +929,7 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
bridge->ifindex = ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
return bridge;
@@ -911,12 +1067,13 @@ mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
}
static int
-mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
- } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ } vlan_hdr = { htons(vlan_proto), htons(vlan->vid) };
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
@@ -1008,36 +1165,58 @@ mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct
vlan->pkt_mod_hdr_push_mark = NULL;
}
-static struct mlx5_esw_bridge_vlan *
-mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
- struct mlx5_eswitch *esw)
+static int
+mlx5_esw_bridge_vlan_push_pop_create(u16 vlan_proto, u16 flags, struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_eswitch *esw)
{
- struct mlx5_esw_bridge_vlan *vlan;
int err;
- vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
- return ERR_PTR(-ENOMEM);
-
- vlan->vid = vid;
- vlan->flags = flags;
- INIT_LIST_HEAD(&vlan->fdb_list);
-
if (flags & BRIDGE_VLAN_INFO_PVID) {
- err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ err = mlx5_esw_bridge_vlan_push_create(vlan_proto, vlan, esw);
if (err)
- goto err_vlan_push;
+ return err;
err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
if (err)
goto err_vlan_push_mark;
}
+
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
if (err)
goto err_vlan_pop;
}
+ return 0;
+
+err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+ return err;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vlan_proto, u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ err = mlx5_esw_bridge_vlan_push_pop_create(vlan_proto, flags, vlan, esw);
+ if (err)
+ goto err_vlan_push_pop;
+
err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
if (err)
goto err_xa_insert;
@@ -1048,13 +1227,11 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
-err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
-err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
-err_vlan_push:
+err_vlan_push_pop:
kvfree(vlan);
return ERR_PTR(err);
}
@@ -1102,6 +1279,50 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
}
+static int mlx5_esw_bridge_port_vlans_recreate(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&port->vlans, i, vlan) {
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ err = mlx5_esw_bridge_vlan_push_pop_create(bridge->vlan_proto, vlan->flags, vlan,
+ br_offloads->esw);
+ if (err) {
+ esw_warn(br_offloads->esw->dev,
+ "Failed to create VLAN=%u(proto=%x) push/pop actions (vport=%u,err=%d)\n",
+ vlan->vid, bridge->vlan_proto, port->vport_num,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int
+mlx5_esw_bridge_vlans_recreate(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_esw_bridge_port *port;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&br_offloads->ports, i, port) {
+ if (port->bridge != bridge)
+ continue;
+
+ err = mlx5_esw_bridge_port_vlans_recreate(port, bridge);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw)
@@ -1287,6 +1508,32 @@ int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, boo
return 0;
}
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge *bridge;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id,
+ br_offloads);
+ if (!port)
+ return -EINVAL;
+
+ bridge = port->bridge;
+ if (bridge->vlan_proto == proto)
+ return 0;
+ if (proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ esw_warn(br_offloads->esw->dev, "Can't set unsupported VLAN protocol %x", proto);
+ return -EOPNOTSUPP;
+ }
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ bridge->vlan_proto = proto;
+ mlx5_esw_bridge_vlans_recreate(bridge);
+
+ return 0;
+}
+
static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
@@ -1434,7 +1681,8 @@ int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid,
mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge);
}
- vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw);
+ vlan = mlx5_esw_bridge_vlan_create(port->bridge->vlan_proto, vid, flags, port,
+ br_offloads->esw);
if (IS_ERR(vlan)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
return PTR_ERR(vlan);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index efc39975226e..10851a515bca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -26,7 +26,9 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_flow_table *ingress_ft;
struct mlx5_flow_group *ingress_vlan_fg;
- struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_vlan_filter_fg;
+ struct mlx5_flow_group *ingress_qinq_fg;
+ struct mlx5_flow_group *ingress_qinq_filter_fg;
struct mlx5_flow_group *ingress_mac_fg;
struct mlx5_flow_table *skip_ft;
@@ -60,6 +62,8 @@ int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsign
struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable,
struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_vlan_proto_set(u16 vport_num, u16 esw_owner_vhca_id, u16 proto,
+ struct mlx5_esw_bridge_offloads *br_offloads);
int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
new file mode 100644
index 000000000000..2db13c71e88c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include "eswitch.h"
+
+enum vnic_diag_counter {
+ MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW,
+ MLX5_VNIC_DIAG_COMP_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN,
+ MLX5_VNIC_DIAG_CQ_OVERRUN,
+ MLX5_VNIC_DIAG_INVALID_COMMAND,
+ MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+};
+
+static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
+ u32 *val)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
+ struct mlx5_core_dev *dev = vport->dev;
+ u16 vport_num = vport->vport;
+ void *vnic_diag_out;
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
+ if (!mlx5_esw_is_manager_vport(dev->priv.eswitch, vport_num))
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ vnic_diag_out = MLX5_ADDR_OF(query_vnic_env_out, out, vport_env);
+ switch (counter) {
+ case MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, total_error_queues);
+ break;
+ case MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out,
+ send_queue_priority_update_flow);
+ break;
+ case MLX5_VNIC_DIAG_COMP_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, comp_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, async_eq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_CQ_OVERRUN:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, cq_overrun);
+ break;
+ case MLX5_VNIC_DIAG_INVALID_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, invalid_command);
+ break;
+ case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
+ *val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
+ break;
+ }
+
+ return 0;
+}
+
+static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
+ enum vnic_diag_counter type)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = mlx5_esw_query_vnic_diag(vport, type, &val);
+ if (ret)
+ return ret;
+
+ seq_printf(file, "%d\n", val);
+ return 0;
+}
+
+static int total_q_under_processor_handle_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_TOTAL_Q_UNDER_PROCESSOR_HANDLE);
+}
+
+static int send_queue_priority_update_flow_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private,
+ MLX5_VNIC_DIAG_SEND_QUEUE_PRIORITY_UPDATE_FLOW);
+}
+
+static int comp_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_COMP_EQ_OVERRUN);
+}
+
+static int async_eq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_ASYNC_EQ_OVERRUN);
+}
+
+static int cq_overrun_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_CQ_OVERRUN);
+}
+
+static int invalid_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_INVALID_COMMAND);
+}
+
+static int quota_exceeded_command_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
+}
+
+DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
+DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
+DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
+DEFINE_SHOW_ATTRIBUTE(cq_overrun);
+DEFINE_SHOW_ATTRIBUTE(invalid_command);
+DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ debugfs_remove_recursive(vport->dbgfs);
+ vport->dbgfs = NULL;
+}
+
+/* vnic diag dir name is "pf", "ecpf" or "{vf/sf}_xxxx" */
+#define VNIC_DIAG_DIR_NAME_MAX_LEN 8
+
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ struct dentry *vnic_diag;
+ char dir_name[VNIC_DIAG_DIR_NAME_MAX_LEN];
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return;
+
+ if (vport_num == MLX5_VPORT_PF) {
+ strcpy(dir_name, "pf");
+ } else if (vport_num == MLX5_VPORT_ECPF) {
+ strcpy(dir_name, "ecpf");
+ } else {
+ err = snprintf(dir_name, VNIC_DIAG_DIR_NAME_MAX_LEN, "%s_%d", is_sf ? "sf" : "vf",
+ is_sf ? sf_num : vport_num - MLX5_VPORT_FIRST_VF);
+ if (WARN_ON(err < 0))
+ return;
+ }
+
+ vport->dbgfs = debugfs_create_dir(dir_name, esw->dbgfs);
+ vnic_diag = debugfs_create_dir("vnic_diag", vport->dbgfs);
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_queue_counters)) {
+ debugfs_create_file("total_q_under_processor_handle", 0444, vnic_diag, vport,
+ &total_q_under_processor_handle_fops);
+ debugfs_create_file("send_queue_priority_update_flow", 0444, vnic_diag, vport,
+ &send_queue_priority_update_flow_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, eq_overrun_count)) {
+ debugfs_create_file("comp_eq_overrun", 0444, vnic_diag, vport,
+ &comp_eq_overrun_fops);
+ debugfs_create_file("async_eq_overrun", 0444, vnic_diag, vport,
+ &async_eq_overrun_fops);
+ }
+
+ if (MLX5_CAP_GEN(esw->dev, vnic_env_cq_overrun))
+ debugfs_create_file("cq_overrun", 0444, vnic_diag, vport, &cq_overrun_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, invalid_command_count))
+ debugfs_create_file("invalid_command", 0444, vnic_diag, vport,
+ &invalid_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
+ debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
+ &quota_exceeded_command_fops);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 7f9b96d9537e..9bc7be95db54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -87,11 +87,11 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
goto reg_err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -99,7 +99,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
@@ -118,10 +118,10 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
@@ -156,11 +156,11 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devlink_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register(devlink, dl_port, dl_port_index);
if (err)
return err;
- err = devlink_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport);
if (err)
goto rate_err;
@@ -168,7 +168,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
return 0;
rate_err:
- devlink_port_unregister(dl_port);
+ devl_port_unregister(dl_port);
return err;
}
@@ -182,9 +182,9 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num
if (vport->dl_port->devlink_rate) {
mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devlink_rate_leaf_destroy(vport->dl_port);
+ devl_rate_leaf_destroy(vport->dl_port);
}
- devlink_port_unregister(vport->dl_port);
+ devl_port_unregister(vport->dl_port);
vport->dl_port = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 719ef26d23c0..6aa58044b949 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
+#include <linux/debugfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "esw/qos.h"
@@ -1002,6 +1003,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
if (err)
return err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
err = esw_offloads_load_rep(esw, vport_num);
if (err)
goto err_rep;
@@ -1009,6 +1011,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
return err;
}
@@ -1016,6 +1019,7 @@ err_rep:
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
@@ -1152,8 +1156,6 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
{
const u32 *out;
- WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
-
if (num_vfs < 0)
return;
@@ -1186,6 +1188,9 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
int total_vports;
int err;
+ if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
+ return 0;
+
total_vports = mlx5_eswitch_get_total_vports(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
@@ -1203,6 +1208,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
} else {
esw_warn(dev, "ingress ACL is not supported by FW\n");
}
+ esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
return 0;
err:
@@ -1215,6 +1221,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
+ esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
mlx5_fs_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
@@ -1224,7 +1231,6 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
- * @mode: Eswitch mode to enable
* @num_vfs: Enable eswitch for given number of VFs. This is optional.
* Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
* Caller should pass num_vfs > 0 when enabling eswitch for
@@ -1238,7 +1244,7 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
* mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
* It returns 0 on success or error code on failure.
*/
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
{
int err;
@@ -1257,9 +1263,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
- esw->mode = mode;
-
- if (mode == MLX5_ESWITCH_LEGACY) {
+ if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_rescan_drivers(esw->dev);
@@ -1269,22 +1273,19 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
if (err)
goto abort;
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
- mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mlx5_esw_mode_change_notify(esw, mode);
+ mlx5_esw_mode_change_notify(esw, esw->mode);
return 0;
abort:
- esw->mode = MLX5_ESWITCH_NONE;
-
- if (mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
-
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
@@ -1305,14 +1306,16 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
if (!mlx5_esw_allowed(esw))
return 0;
- toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+ devl_assert_locked(priv_to_devlink(esw->dev));
+
+ toggle_lag = !mlx5_esw_is_fdb_created(esw);
if (toggle_lag)
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- if (esw->mode == MLX5_ESWITCH_NONE) {
- ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+ if (!mlx5_esw_is_fdb_created(esw)) {
+ ret = mlx5_eswitch_enable_locked(esw, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
@@ -1330,55 +1333,81 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return ret;
}
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
+/* When disabling sriov, free driver level resources. */
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
{
- struct devlink *devlink = priv_to_devlink(esw->dev);
- int old_mode;
-
- lockdep_assert_held_write(&esw->mode_lock);
-
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_allowed(esw))
return;
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ devl_assert_locked(priv_to_devlink(esw->dev));
+ down_write(&esw->mode_lock);
+ /* If driver is unloaded, this function is called twice by remove_one()
+ * and mlx5_unload(). Prevent the second call.
+ */
+ if (!esw->esw_funcs.num_vfs && !clear_vf)
+ goto unlock;
+
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- /* Notify eswitch users that it is exiting from current mode.
- * So that it can do necessary cleanup before the eswitch is disabled.
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ /* If disabling sriov in switchdev mode, free meta rules here
+ * because it depends on num_vfs.
*/
- mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ struct devlink *devlink = priv_to_devlink(esw->dev);
- mlx5_eswitch_event_handlers_unregister(esw);
+ esw_offloads_del_send_to_vport_meta_rules(esw);
+ devl_rate_nodes_destroy(devlink);
+ }
- if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
+ esw->esw_funcs.num_vfs = 0;
- old_mode = esw->mode;
- esw->mode = MLX5_ESWITCH_NONE;
+unlock:
+ up_write(&esw->mode_lock);
+}
- if (old_mode == MLX5_ESWITCH_OFFLOADS)
- mlx5_rescan_drivers(esw->dev);
+/* Free resources for corresponding eswitch mode. It is called by devlink
+ * when changing eswitch mode or modprobe when unloading driver.
+ */
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
+{
+ struct devlink *devlink = priv_to_devlink(esw->dev);
+
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- devlink_rate_nodes_destroy(devlink);
+ mlx5_eswitch_event_handlers_unregister(esw);
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
+ esw->esw_funcs.num_vfs, esw->enabled_vports);
+
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
mlx5_esw_acls_ns_cleanup(esw);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ devl_rate_nodes_destroy(devlink);
}
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
if (!mlx5_esw_allowed(esw))
return;
+ devl_assert_locked(priv_to_devlink(esw->dev));
mlx5_lag_disable_change(esw->dev);
down_write(&esw->mode_lock);
- mlx5_eswitch_disable_locked(esw, clear_vf);
- esw->esw_funcs.num_vfs = 0;
+ mlx5_eswitch_disable_locked(esw);
up_write(&esw->mode_lock);
mlx5_lag_enable_change(esw->dev);
}
@@ -1573,7 +1602,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
refcount_set(&esw->qos.refcnt, 0);
esw->enabled_vports = 0;
- esw->mode = MLX5_ESWITCH_NONE;
+ esw->mode = MLX5_ESWITCH_LEGACY;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
@@ -1587,6 +1616,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
+ esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
esw_info(dev,
"Total vports %d, per vport: max uc(%d) max mc(%d)\n",
esw->total_vports,
@@ -1610,6 +1640,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n");
+ debugfs_remove_recursive(esw->dbgfs);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt));
@@ -1875,7 +1906,7 @@ u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
+ return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
@@ -1995,8 +2026,6 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
*/
void mlx5_esw_unlock(struct mlx5_eswitch *esw)
{
- if (!mlx5_esw_allowed(esw))
- return;
up_write(&esw->mode_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2754a732914d..87ce5a208cb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -191,6 +191,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct dentry *dbgfs;
};
struct mlx5_esw_indir_table;
@@ -282,10 +283,15 @@ struct mlx5_esw_functions {
enum {
MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
+ MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
};
struct mlx5_esw_bridge_offloads;
+enum {
+ MLX5_ESW_FDB_CREATED = BIT(0),
+};
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -331,12 +337,14 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
+ struct dentry *dbgfs;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
@@ -350,10 +358,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
-int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
+int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
-void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
-void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
+void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
+void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -575,6 +584,11 @@ mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
return dl_port_index & 0xffff;
}
+static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
+}
+
/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
@@ -672,6 +686,9 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool is_sf, u16 sf_num);
+void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
@@ -719,7 +736,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
-static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
+static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2ce3728576d1..ed73132129aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
}
static void
-esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
- struct mlx5_flow_act *flow_act,
- struct mlx5_fs_chains *chains,
- int i)
+esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains, int i)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, int i)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+}
+
static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
@@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (mlx5e_tc_attr_flags_skip(attr->flags)) {
- esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, esw, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
+ esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -512,6 +523,20 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
}
}
+static void
+esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
+{
+ struct mlx5e_flow_meter_handle *meter;
+
+ meter = attr->meter_attr.meter;
+ flow_act->exe_aso.type = attr->exe_aso_type;
+ flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
+ flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
+ /* use metadata reg 5 for packet color */
+ flow_act->exe_aso.return_reg_id = 5;
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -579,6 +604,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
+ if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
+ esw_setup_meter(attr, &flow_act);
+
if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
@@ -1040,6 +1069,15 @@ static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(flows[i]);
kvfree(flows);
+ /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
+ * meta rules could be freed again. So set it to NULL.
+ */
+ esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+}
+
+void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
}
static int
@@ -2034,7 +2072,7 @@ static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
- if (esw->mode == MLX5_ESWITCH_NONE)
+ if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
@@ -2170,18 +2208,18 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- esw->dev->priv.sriov.num_vfs);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to legacy");
}
+ mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2894,7 +2932,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
int err = 0;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_NONE) {
+ if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
@@ -3055,6 +3093,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
+ struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
@@ -3066,6 +3105,8 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
+ devlink = priv_to_devlink(esw->dev);
+ devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
@@ -3078,6 +3119,7 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
return;
}
esw->esw_funcs.num_vfs = new_num_vfs;
+ devl_unlock(devlink);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
@@ -3229,13 +3271,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable_locked(esw, false);
- err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_LEGACY;
+ err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
- MLX5_ESWITCH_IGNORE_NUM_VFS);
+ esw->mode = MLX5_ESWITCH_OFFLOADS;
+ err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err1) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch back to offloads");
@@ -3334,36 +3375,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
-{
- /* devlink commands in NONE eswitch mode are currently supported only
- * on ECPF.
- */
- return (esw->mode == MLX5_ESWITCH_NONE &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
-}
-
-/* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
-static void mlx5_eswtich_mode_callback_enter(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- devl_unlock(devlink);
- down_write(&esw->mode_lock);
-}
-
-static void mlx5_eswtich_mode_callback_exit(struct devlink *devlink,
- struct mlx5_eswitch *esw)
-{
- up_write(&esw->mode_lock);
- devl_lock(devlink);
-}
-
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3378,15 +3389,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- /* FIXME: devl_unlock() followed by devl_lock() inside driver callback
- * is never correct and prone to races. It's a transitional workaround,
- * never repeat this pattern.
- *
- * This code MUST be fixed before removing devlink_mutex as it is safe
- * to do only because of that mutex.
- */
- devl_unlock(devlink);
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
@@ -3399,6 +3401,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
+ mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -3409,6 +3412,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
+ mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
@@ -3417,7 +3421,6 @@ unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
- devl_lock(devlink);
return err;
}
@@ -3430,14 +3433,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3484,10 +3482,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto out;
+ down_write(&esw->mode_lock);
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
@@ -3521,11 +3516,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
goto out;
esw->offloads.inline_mode = mlx5_mode;
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return 0;
out:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3538,14 +3533,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3555,16 +3545,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
- int err;
+ int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
+ down_write(&esw->mode_lock);
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
@@ -3607,7 +3594,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
}
unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
+ up_write(&esw->mode_lock);
return err;
}
@@ -3615,21 +3602,15 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
- int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
- mlx5_eswtich_mode_callback_enter(devlink, esw);
- err = eswitch_devlink_esw_mode_check(esw);
- if (err)
- goto unlock;
-
+ down_write(&esw->mode_lock);
*encap = esw->offloads.encap;
-unlock:
- mlx5_eswtich_mode_callback_exit(devlink, esw);
- return err;
+ up_write(&esw->mode_lock);
+ return 0;
}
static bool
@@ -3752,12 +3733,14 @@ int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_p
if (err)
goto devlink_err;
+ mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto rep_err;
return 0;
rep_err:
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
devlink_err:
mlx5_esw_vport_disable(esw, vport_num);
@@ -3767,6 +3750,7 @@ devlink_err:
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_vport_debugfs_destroy(esw, vport_num);
mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
mlx5_esw_vport_disable(esw, vport_num);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 2ccf7bef9b05..735dc805dad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -479,6 +479,30 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
return 0;
}
+
+static void
+mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
+{
+ void *exe_aso_ctrl;
+ void *execute_aso;
+
+ execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
+ execute_aso[0]);
+ MLX5_SET(execute_aso, execute_aso, valid, 1);
+ MLX5_SET(execute_aso, execute_aso, aso_object_id,
+ fte->action.exe_aso.object_id);
+
+ exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
+ fte->action.exe_aso.return_reg_id);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
+ fte->action.exe_aso.type);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
+ fte->action.exe_aso.flow_meter.init_color);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
+ fte->action.exe_aso.flow_meter.meter_idx);
+}
+
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
@@ -663,6 +687,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 21e5c709b2d3..f1b908d40fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2895,6 +2895,14 @@ static int create_fdb_bypass(struct mlx5_flow_steering *steering)
return 0;
}
+static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+}
+
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *maj_prio;
@@ -2945,10 +2953,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
return 0;
out_err:
- cleanup_root_ns(steering->fdb_root_ns);
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
- steering->fdb_root_ns = NULL;
+ cleanup_fdb_root_ns(steering);
return err;
}
@@ -3108,10 +3113,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
struct mlx5_flow_steering *steering = dev->priv.steering;
cleanup_root_ns(steering->root_ns);
- cleanup_root_ns(steering->fdb_root_ns);
- steering->fdb_root_ns = NULL;
- kfree(steering->fdb_sub_ns);
- steering->fdb_sub_ns = NULL;
+ cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index cfb8bedba512..079fa44ada71 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -289,6 +289,10 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
sw_owner_id[i]);
}
+ if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) &&
+ dev->priv.sw_vhca_id > 0)
+ MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id);
+
return mlx5_cmd_exec_in(dev, init_hca, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 052af4901c0b..e8896f368362 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -149,6 +149,9 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
+ mlx5_unload_one(dev);
+ if (mlx5_health_wait_pci_up(dev))
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -183,15 +186,9 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
reset_reload_work);
struct mlx5_core_dev *dev = fw_reset->dev;
- int err;
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
- err = mlx5_health_wait_pci_up(dev);
- if (err)
- mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
}
@@ -395,7 +392,6 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
}
mlx5_enter_error_state(dev, true);
- mlx5_unload_one(dev);
done:
fw_reset->ret = err;
mlx5_fw_reset_complete_reload(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 659021c31cbd..2cf2c9948446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -666,16 +666,20 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
struct mlx5_fw_reporter_ctx fw_reporter_ctx;
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
health = container_of(work, struct mlx5_core_health, fatal_report_work);
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
+ devlink = priv_to_devlink(dev);
enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
+ devl_lock(devlink);
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
+ devl_unlock(devlink);
return;
}
fw_reporter_ctx.err_synd = health->synd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 8da73ef5680f..ac3757beaea2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- mlx5e_ethtool_get_ringparam(priv, param);
+ mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
static int mlx5i_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 0a99a020a3b2..c02b7b08fb4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs.ns)
+ if (!priv->fs->ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
@@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
+ priv->fs = mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ if (!priv->fs) {
+ netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
+ }
+
+ priv->rx_res = mlx5e_rx_res_alloc();
+ if (!priv->rx_res) {
+ err = -ENOMEM;
+ goto err_free_fs;
+ }
mlx5e_create_q_counters(priv);
@@ -397,6 +406,8 @@ err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+err_free_fs:
+ mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
+ mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 5d41e19378e0..0f34e3c80d1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -638,6 +638,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
+ struct mlx5_core_dev *dev;
u8 mode;
#endif
int i;
@@ -647,11 +648,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
-
- if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS)
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
return false;
+ mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
@@ -766,8 +767,7 @@ static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH
for (i = 0; i < ldev->ports; i++)
- roce_lag = roce_lag &&
- ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag && is_mdev_legacy_mode(ldev->pf[i].dev);
#endif
return roce_lag;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
new file mode 100644
index 000000000000..21e14507ff5c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/transobj.h>
+#include "aso.h"
+#include "wq.h"
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ if (IS_ERR_OR_NULL(aso))
+ return;
+
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+ unsigned long expires;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+
+ expires = jiffies + msecs_to_jiffies(interval_ms);
+ while (!cqe && time_is_after_jiffies(expires)) {
+ usleep_range(2, 10);
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ }
+
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
new file mode 100644
index 000000000000..b3bbf284fe71
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <linux/mlx5/qp.h>
+#include "mlx5_core.h"
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+};
+
+struct mlx5_aso;
+
+void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 3d5e57ff558c..9482e51ac82a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -12,13 +12,16 @@ struct mlx5_dm {
spinlock_t lock;
unsigned long *steering_sw_icm_alloc_blocks;
unsigned long *header_modify_sw_icm_alloc_blocks;
+ unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
};
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
{
+ u64 header_modify_pattern_icm_blocks = 0;
u64 header_modify_icm_blocks = 0;
u64 steering_icm_blocks = 0;
struct mlx5_dm *dm;
+ bool support_v2;
if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM))
return NULL;
@@ -35,8 +38,7 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->steering_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(steering_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(steering_icm_blocks, GFP_KERNEL);
if (!dm->steering_sw_icm_alloc_blocks)
goto err_steering;
}
@@ -47,16 +49,33 @@ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
dm->header_modify_sw_icm_alloc_blocks =
- kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
- sizeof(unsigned long), GFP_KERNEL);
+ bitmap_zalloc(header_modify_icm_blocks, GFP_KERNEL);
if (!dm->header_modify_sw_icm_alloc_blocks)
goto err_modify_hdr;
}
+ support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
+ MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
+
+ if (support_v2) {
+ header_modify_pattern_icm_blocks =
+ BIT(MLX5_CAP_DEV_MEM(dev, log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+ dm->header_modify_pattern_sw_icm_alloc_blocks =
+ bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
+ if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
+ goto err_pattern;
+ }
+
return dm;
+err_pattern:
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+
err_modify_hdr:
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
err_steering:
kfree(dm);
@@ -75,7 +94,7 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!bitmap_empty(dm->steering_sw_icm_alloc_blocks,
BIT(MLX5_CAP_DEV_MEM(dev, log_steering_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->steering_sw_icm_alloc_blocks);
+ bitmap_free(dm->steering_sw_icm_alloc_blocks);
}
if (dm->header_modify_sw_icm_alloc_blocks) {
@@ -83,7 +102,15 @@ void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
BIT(MLX5_CAP_DEV_MEM(dev,
log_header_modify_sw_icm_size) -
MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
- kfree(dm->header_modify_sw_icm_alloc_blocks);
+ bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
+ }
+
+ if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
+ WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
+ BIT(MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size) -
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+ bitmap_free(dm->header_modify_pattern_sw_icm_alloc_blocks);
}
kfree(dm);
@@ -130,6 +157,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
log_header_modify_sw_icm_size);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ log_icm_size = MLX5_CAP_DEV_MEM(dev,
+ log_header_modify_pattern_sw_icm_size);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
@@ -203,6 +237,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
icm_start_addr = MLX5_CAP64_DEV_MEM(dev, header_modify_sw_icm_start_address);
block_map = dm->header_modify_sw_icm_alloc_blocks;
break;
+ case MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN:
+ icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+ header_modify_pattern_sw_icm_start_address);
+ block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index d758848d34d0..696e45e2bd06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
+int mlx5_tout_init(struct mlx5_core_dev *dev)
{
int i;
- for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
- tout_set(dev, tout_def_sw_val[i], i);
-}
-
-int mlx5_tout_init(struct mlx5_core_dev *dev)
-{
dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
if (!dev->timeouts)
return -ENOMEM;
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 257c03eeab36..bc9e9aeda847 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
-void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c9b4e50a593e..bec8d6d0b5f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -90,6 +90,8 @@ module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
static u32 sw_owner_id[4];
+#define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
+static DEFINE_IDA(sw_vhca_ida);
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
@@ -314,13 +316,6 @@ struct mlx5_reg_host_endianness {
u8 rsvd[15];
};
-#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
-
-enum {
- MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
- MLX5_DEV_CAP_FLAG_DCT,
-};
-
static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
{
switch (size) {
@@ -499,6 +494,31 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+
+ if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
+ !(dev->priv.sw_vhca_id > 0))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
+ MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
+ MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
+
+ return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
+}
+
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = &dev->profile;
@@ -524,7 +544,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -669,6 +689,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_2(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -1023,8 +1050,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- mlx5_tout_set_def_val(dev);
-
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
@@ -1257,6 +1282,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
@@ -1276,8 +1302,10 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
int mlx5_init_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
int err = 0;
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
@@ -1306,6 +1334,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return 0;
err_register:
@@ -1320,11 +1349,15 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
return err;
}
void mlx5_uninit_one(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
@@ -1343,13 +1376,15 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true);
out:
mutex_unlock(&dev->intf_state_mutex);
+ devl_unlock(devlink);
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
{
int err = 0;
u64 timeout;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "interface is up, NOP\n");
@@ -1391,8 +1426,20 @@ out:
return err;
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int ret;
+
+ devl_lock(devlink);
+ ret = mlx5_load_one_devl_locked(dev, recovery);
+ devl_unlock(devlink);
+ return ret;
+}
+
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
mlx5_detach_device(dev);
@@ -1410,6 +1457,15 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
+void mlx5_unload_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_unload_one_devl_locked(dev);
+ devl_unlock(devlink);
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1512,6 +1568,18 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
if (err)
goto err_hca_caps;
+ /* The conjunction of sw_vhca_id with sw_owner_id will be a global
+ * unique id per function which uses mlx5_core.
+ * Those values are supplied to FW as part of the init HCA command to
+ * be used by both driver and FW when it's applicable.
+ */
+ dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
+ MAX_SW_VHCA_ID,
+ GFP_KERNEL);
+ if (dev->priv.sw_vhca_id < 0)
+ mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
+ dev->priv.sw_vhca_id);
+
return 0;
err_hca_caps:
@@ -1536,6 +1604,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
+ if (priv->sw_vhca_id > 0)
+ ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
+
mlx5_hca_caps_free(dev);
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
@@ -1859,7 +1930,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one_devl_locked(dev);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -1870,7 +1941,7 @@ int mlx5_recover_device(struct mlx5_core_dev *dev)
return -EIO;
}
- return mlx5_load_one(dev, true);
+ return mlx5_load_one_devl_locked(dev, true);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 9cc7afea2758..ad61b86d5769 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -290,7 +290,9 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 3be659cd91f1..7d955a4d9f14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -501,7 +501,7 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
case MLX5_ESWITCH_OFFLOADS:
mlx5_sf_table_enable(table);
break;
- case MLX5_ESWITCH_NONE:
+ case MLX5_ESWITCH_LEGACY:
mlx5_sf_table_disable(table);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2935614f6fa9..ee2e1b7c1310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -145,8 +145,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
sriov->vfs_ctx[vf].enabled = 0;
}
- if (MLX5_ESWITCH_MANAGER(dev))
- mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
@@ -155,13 +154,16 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
return err;
}
+ devl_unlock(devlink);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
@@ -174,10 +176,13 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
int num_vfs = pci_num_vf(dev->pdev);
pci_disable_sriov(pdev);
+ devl_lock(devlink);
mlx5_device_disable_sriov(dev, num_vfs, true);
+ devl_unlock(devlink);
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 1383550f44c1..b1dfad274a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -22,6 +22,7 @@ enum dr_action_valid_state {
DR_ACTION_STATE_PUSH_VLAN,
DR_ACTION_STATE_NON_TERM,
DR_ACTION_STATE_TERM,
+ DR_ACTION_STATE_ASO,
DR_ACTION_STATE_MAX,
};
@@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+ [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
@@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
@@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
+ },
+ [DR_ACTION_STATE_ASO] = {
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -738,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.reformat.param_0 = action->reformat->param_0;
attr.reformat.param_1 = action->reformat->param_1;
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ attr.aso_flow_meter.obj_id = action->aso->obj_id;
+ attr.aso_flow_meter.offset = action->aso->offset;
+ attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+ attr.aso_flow_meter.init_color = action->aso->init_color;
+ break;
default:
mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
return -EINVAL;
@@ -798,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
+ [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
};
static struct mlx5dr_action *
@@ -1830,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return action;
}
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+ u8 dest_reg_id, u8 aso_type,
+ u8 init_color, u8 meter_id)
+{
+ struct mlx5dr_action *action;
+
+ if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+ return NULL;
+
+ if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+ return NULL;
+
+ action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+ if (!action)
+ return NULL;
+
+ action->aso->obj_id = obj_id;
+ action->aso->offset = meter_id;
+ action->aso->dest_reg_id = dest_reg_id;
+ action->aso->init_color = init_color;
+ action->aso->dmn = dmn;
+
+ refcount_inc(&dmn->refcount);
+
+ return action;
+}
+
int mlx5dr_action_destroy(struct mlx5dr_action *action)
{
if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
@@ -1881,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_SAMPLER:
refcount_dec(&action->sampler->dmn->refcount);
break;
+ case DR_ACTION_TYP_ASO_FLOW_METER:
+ refcount_dec(&action->aso->dmn->refcount);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index d5998ef59be4..7adcf0eec13b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -21,10 +21,11 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_TABLE_TX = 3102,
DR_DUMP_REC_TYPE_MATCHER = 3200,
- DR_DUMP_REC_TYPE_MATCHER_MASK = 3201,
+ DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+ DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
DR_DUMP_REC_TYPE_RULE = 3300,
DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
@@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id);
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id);
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
break;
case DR_ACTION_TYP_CTR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index fcb962c6db2e..ee677a5c76be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
@@ -129,6 +130,10 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
+enum dr_ste_v1_aso_ctx_type {
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
@@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+ u32 object_id,
+ u32 offset,
+ u8 dest_reg_id,
+ u8 init_color)
+{
+ MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_ASO);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+ object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+ /* Convert reg_c index to HW 64bit index */
+ MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+ (dest_reg_id - 1) / 2);
+ MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+ DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+ offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+ MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+ init_color);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_aso_flow_meter(action,
+ attr->aso_flow_meter.obj_id,
+ attr->aso_flow_meter.offset,
+ attr->aso_flow_meter.dest_reg_id,
+ attr->aso_flow_meter.init_color);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 98320e3945ad..feed227944b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -127,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
+ DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
@@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
+
+ struct {
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+ } aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
+struct mlx5dr_action_aso_flow_meter {
+ struct mlx5dr_domain *dmn;
+ u32 obj_id;
+ u32 offset;
+ u8 dest_reg_id;
+ u8 init_color;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -1049,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
+ struct mlx5dr_action_aso_flow_meter *aso;
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 6a9abba92df6..75672663359d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -500,6 +500,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action =
+ mlx5dr_action_create_aso(domain,
+ fte->action.exe_aso.object_id,
+ fte->action.exe_aso.return_reg_id,
+ fte->action.exe_aso.type,
+ fte->action.exe_aso.flow_meter.init_color,
+ fte->action.exe_aso.flow_meter.meter_idx);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ actions[num_actions++] = tmp_action;
+ }
+
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index 9604b2091358..fb078fa0f0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
+enum {
+ MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+ u8 reserved_at_0[0xc];
+ u8 action[0x1];
+ u8 initial_color[0x2];
+ u8 line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+ u8 action_id[0x8];
+ u8 aso_context_number[0x18];
+
+ u8 dest_reg_id[0x2];
+ u8 change_ordering_tag[0x1];
+ u8 aso_check_ordering[0x1];
+ u8 aso_context_type[0x4];
+ u8 reserved_at_28[0x8];
+ union {
+ u8 aso_fields[0x10];
+ struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+ };
+};
+
#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7626c85643b1..ecffc2cbe6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -131,6 +131,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+ u32 obj_id,
+ u8 return_reg_id,
+ u8 aso_type,
+ u8 init_color,
+ u8 meter_id);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ac020cb78072..d5c317325030 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1086,9 +1086,17 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
goto free;
MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
- nic_vport_context.affiliated_vhca_id,
- MLX5_CAP_GEN(master_mdev, vhca_id));
+ if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
+ } else {
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ }
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.affiliation_criteria,
MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 84621b4cb15b..b03e1c66bac0 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -19,8 +19,6 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-#define DRV_NAME "mlxbf_gige"
-
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
@@ -427,7 +425,7 @@ static struct platform_driver mlxbf_gige_driver = {
.remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 4683312861ac..a510bf2cff2f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -7,6 +7,7 @@ config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
select NET_DEVLINK
select MLXFW
+ select AUXILIARY_BUS
help
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 1a465fd5d8b3..3ca9fce759ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
core_acl_flex_actions.o core_env.o \
- core_linecards.o
+ core_linecards.o core_linecard_dev.o
mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o
@@ -12,7 +12,6 @@ mlxsw_i2c-objs := i2c.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
- spectrum_router_xm.o \
spectrum1_kvdl.o spectrum2_kvdl.o \
spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
@@ -29,7 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
spectrum_dpipe.o spectrum_trap.o \
- spectrum_ethtool.o spectrum_policer.o
+ spectrum_ethtool.o spectrum_policer.o \
+ spectrum_pgt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 51b260d54237..60232fb8ccd7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -329,6 +329,32 @@ MLXSW_ITEM64(cmd_mbox, query_fw, free_running_clock_offset, 0x50, 0, 64);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, fr_rn_clk_bar, 0x58, 30, 2);
+/* cmd_mbox_query_fw_utc_sec_offset
+ * The offset of the UTC_Sec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_sec_offset, 0x70, 0, 64);
+
+/* cmd_mbox_query_fw_utc_sec_bar
+ * PCI base address register (BAR) of the UTC_Sec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_sec_bar, 0x78, 30, 2);
+
+/* cmd_mbox_query_fw_utc_nsec_offset
+ * The offset of the UTC_nSec page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, utc_nsec_offset, 0x80, 0, 64);
+
+/* cmd_mbox_query_fw_utc_nsec_bar
+ * PCI base address register (BAR) of the UTC_nSec page
+ * 0: BAR 0
+ * 1: 64 bit BAR
+ * Reserved on SwitchX/-2, Switch-IB/2, Spectrum-1
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, utc_nsec_bar, 0x88, 30, 2);
+
/* QUERY_BOARDINFO - Query Board Information
* -----------------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -343,23 +369,6 @@ static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
}
-/* cmd_mbox_xm_num_local_ports
- * Number of local_ports connected to the xm.
- * Each local port is a 4x
- * Spectrum-2/3: 25G
- * Spectrum-4: 50G
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_num_local_ports, 0x00, 4, 3);
-
-/* cmd_mbox_xm_exists
- * An XM (eXtanded Mezanine, e.g. used for the XLT) is connected on the board.
- */
-MLXSW_ITEM32(cmd_mbox, boardinfo, xm_exists, 0x00, 0, 1);
-
-/* cmd_mbox_xm_local_port_entry
- */
-MLXSW_ITEM_BIT_ARRAY(cmd_mbox, boardinfo, xm_local_port_entry, 0x04, 4, 8);
-
/* cmd_mbox_boardinfo_intapin
* When PCIe interrupt messages are being used, this value is used for clearing
* an interrupt. When using MSI-X, this register is not used.
@@ -650,6 +659,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_ubridge
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
+
/* cmd_mbox_config_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
@@ -674,11 +689,11 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
-/* cmd_mbox_config_set_kvh_xlt_cache_mode
+/* cmd_mbox_config_set_cqe_time_stamp_type
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, set_kvh_xlt_cache_mode, 0x08, 3, 1);
+MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
@@ -736,16 +751,25 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+enum mlxsw_cmd_mbox_config_profile_flood_mode {
+ /* Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset
+ * tables. max_fid_flood_tables indicates the number of per-FID tables.
+ * Reserved when unified bridge model is used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_MIXED = 3,
+ /* Controlled flood tables. Reserved when legacy bridge model is
+ * used.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED = 4,
+};
+
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
- * 0-2 - Backward compatible modes for SwitchX devices.
- * 3 - Mixed mode, where:
- * max_flood_tables indicates the number of single-entry tables.
- * max_vid_flood_tables indicates the number of per-VID tables.
- * max_fid_offset_flood_tables indicates the number of FID-offset tables.
- * max_fid_flood_tables indicates the number of per-FID tables.
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 3);
/* cmd_mbox_config_profile_max_fid_offset_flood_tables
* Maximum number of FID-offset flooding tables.
@@ -806,12 +830,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
-/* cmd_mbox_config_profile_kvh_xlt_cache_mode
- * KVH XLT cache mode:
- * 0 - XLT can use all KVH as best-effort
- * 1 - XLT cache uses 1/2 KVH
+/* cmd_mbox_config_profile_ubridge
+ * Unified Bridge
+ * 0 - non unified bridge
+ * 1 - unified bridge
*/
-MLXSW_ITEM32(cmd_mbox, config_profile, kvh_xlt_cache_mode, 0x50, 8, 4);
+MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
@@ -866,6 +890,26 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
+enum mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type {
+ /* uSec - 1.024uSec (default). Only bits 15:0 are valid. */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_USEC,
+ /* FRC - Free Running Clock, units of 1nSec.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_FRC,
+ /* UTC. time_stamp[37:30] = Sec, time_stamp[29:0] = nSec.
+ * Reserved when SwitchX/2, Switch-IB/2 and Spectrum-1.
+ */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
+/* cmd_mbox_config_profile_cqe_time_stamp_type
+ * CQE time_stamp_type for non-mirror-packets.
+ * Configured if set_cqe_time_stamp_type is set.
+ * Reserved when SwitchX/-2, Switch-IB/2 and Spectrum-1.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, cqe_time_stamp_type, 0xB0, 8, 2);
+
/* cmd_mbox_config_profile_cqe_version
* CQE version:
* 0: CQE version is 0
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index fc52832241b3..75553eb2c7f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -127,11 +127,11 @@ static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
max_ports, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
- max_ports, MLXSW_CORE_RESOURCE_PORTS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &ports_num_params);
+ return devl_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
}
static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
@@ -157,8 +157,8 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
goto err_resources_ports_register;
}
atomic_set(&mlxsw_core->active_ports_count, 0);
- devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
- mlxsw_ports_occ_get, mlxsw_core);
+ devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
return 0;
@@ -171,9 +171,9 @@ static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
if (!reload)
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
kfree(mlxsw_core->ports);
}
@@ -951,6 +951,20 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
struct mlxsw_core_fw_info {
struct mlxfw_dev mlxfw_dev;
struct mlxsw_core *mlxsw_core;
@@ -1105,8 +1119,9 @@ static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
.fsm_release = mlxsw_core_fw_fsm_release,
};
-static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
- struct netlink_ext_ack *extack)
+static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core_fw_info mlxsw_core_fw_info = {
.mlxfw_dev = {
@@ -1117,13 +1132,9 @@ static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmw
},
.mlxsw_core = mlxsw_core
};
- int err;
- mlxsw_core->fw_flash_in_progress = true;
- err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
- mlxsw_core->fw_flash_in_progress = false;
-
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, &mlxsw_core_fw_info.mlxfw_dev,
+ firmware, extack);
}
static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
@@ -1169,7 +1180,7 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
release_firmware(firmware);
if (err)
dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
@@ -1187,7 +1198,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
+ return mlxsw_core_dev_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
@@ -1498,13 +1509,15 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_re
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
- return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
- mlxsw_core->bus,
- mlxsw_core->bus_priv, true,
- devlink, extack);
+ err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink, extack);
+ return err;
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -2102,6 +2115,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err = -ENOMEM;
goto err_devlink_alloc;
}
+ devl_lock(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2187,6 +2201,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!reload) {
devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
}
return 0;
@@ -2214,12 +2229,14 @@ err_alloc_lag_mapping:
mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
err_devlink_alloc:
return err;
}
@@ -2255,8 +2272,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload)
+ if (!reload) {
devlink_unregister(devlink);
+ devl_lock(devlink);
+ }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2281,16 +2300,19 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
- if (!reload)
+ if (!reload) {
+ devl_unlock(devlink);
devlink_free(devlink);
+ }
return;
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_resources_unregister(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
devlink_free(devlink);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
@@ -3151,18 +3173,6 @@ mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
return mlxsw_core_port->linecard;
}
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port)
-{
- const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
- int i;
-
- for (i = 0; i < bus_info->xm_local_ports_count; i++)
- if (bus_info->xm_local_ports[i] == local_port)
- return true;
- return false;
-}
-EXPORT_SYMBOL(mlxsw_core_port_is_xm);
-
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv, u16 local_port),
void *priv)
@@ -3321,6 +3331,24 @@ u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_read_frc_l);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
+
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->sdq_supports_cqe_v2;
+}
+EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
{
mlxsw_core->emad.enable_string_tlv = true;
@@ -3331,9 +3359,15 @@ static int __init mlxsw_core_module_init(void)
{
int err;
+ err = mlxsw_linecard_driver_register();
+ if (err)
+ return err;
+
mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
- if (!mlxsw_wq)
- return -ENOMEM;
+ if (!mlxsw_wq) {
+ err = -ENOMEM;
+ goto err_alloc_workqueue;
+ }
mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
@@ -3344,6 +3378,8 @@ static int __init mlxsw_core_module_init(void)
err_alloc_ordered_workqueue:
destroy_workqueue(mlxsw_wq);
+err_alloc_workqueue:
+ mlxsw_linecard_driver_unregister();
return err;
}
@@ -3351,6 +3387,7 @@ static void __exit mlxsw_core_module_exit(void)
{
destroy_workqueue(mlxsw_owq);
destroy_workqueue(mlxsw_wq);
+ mlxsw_linecard_driver_unregister();
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c2a891287047..02d9cc2ef0c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -12,12 +12,14 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/net_namespace.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
#include "cmd.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
enum mlxsw_core_resource_id {
MLXSW_CORE_RESOURCE_PORTS = 1,
@@ -47,6 +49,11 @@ mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
+ struct mlxfw_dev *mlxfw_dev,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv, bool reload,
@@ -261,7 +268,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecard *
mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
-bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u16 local_port);
void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
bool (*selector)(void *priv,
u16 local_port),
@@ -296,8 +302,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
+ used_ubridge:1,
used_kvd_sizes:1,
- used_kvh_xlt_cache_mode:1;
+ used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
u16 max_mid;
u16 max_pgt;
@@ -316,10 +323,11 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u8 ubridge;
u32 kvd_linear_size;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
- u8 kvh_xlt_cache_mode;
+ u8 cqe_time_stamp_type;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -419,6 +427,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
+ bool sdq_supports_cqe_v2;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -429,6 +438,11 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core);
+u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
+
+bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
+
void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
@@ -468,6 +482,8 @@ struct mlxsw_bus {
u8 *p_status);
u32 (*read_frc_h)(void *bus_priv);
u32 (*read_frc_l)(void *bus_priv);
+ u32 (*read_utc_sec)(void *bus_priv);
+ u32 (*read_utc_nsec)(void *bus_priv);
u8 features;
};
@@ -478,8 +494,6 @@ struct mlxsw_fw_rev {
u16 can_reset_minor;
};
-#define MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX 4
-
struct mlxsw_bus_info {
const char *device_kind;
const char *device_name;
@@ -488,10 +502,7 @@ struct mlxsw_bus_info {
u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
u8 low_frequency:1,
- read_frc_capable:1,
- xm_exists:1;
- u8 xm_local_ports_count;
- u8 xm_local_ports[MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX];
+ read_clock_capable:1;
};
struct mlxsw_hwmon;
@@ -547,11 +558,17 @@ enum mlxsw_devlink_param_id {
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
};
+struct mlxsw_cqe_ts {
+ u8 sec;
+ u32 nsec;
+};
+
struct mlxsw_skb_cb {
union {
struct mlxsw_tx_info tx_info;
struct mlxsw_rx_md_info rx_md_info;
};
+ struct mlxsw_cqe_ts cqe_ts;
};
static inline struct mlxsw_skb_cb *mlxsw_skb_cb(struct sk_buff *skb)
@@ -567,6 +584,15 @@ enum mlxsw_linecard_status_event_type {
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION,
};
+struct mlxsw_linecard_bdev;
+
+struct mlxsw_linecard_device_info {
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_sub_minor;
+ char psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+};
+
struct mlxsw_linecard {
u8 slot_index;
struct mlxsw_linecards *linecards;
@@ -581,6 +607,11 @@ struct mlxsw_linecard {
active:1;
u16 hw_revision;
u16 ini_version;
+ struct mlxsw_linecard_bdev *bdev;
+ struct {
+ struct mlxsw_linecard_device_info info;
+ u8 index;
+ } device;
};
struct mlxsw_linecard_types_info;
@@ -601,6 +632,14 @@ mlxsw_linecard_get(struct mlxsw_linecards *linecards, u8 slot_index)
return &linecards->linecards[slot_index - 1];
}
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack);
+
int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *bus_info);
void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core);
@@ -620,4 +659,10 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
struct mlxsw_linecards_event_ops *ops,
void *priv);
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard);
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard);
+
+int mlxsw_linecard_driver_register(void);
+void mlxsw_linecard_driver_unregister(void);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index fa33caecc91d..636db9a87457 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1164,7 +1164,7 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
* trap control. In addition, the Trap / Discard action enables activating
* SPAN (port mirroring).
*
- * The Trap with userdef action action has the same functionality as
+ * The Trap with userdef action has the same functionality as
* the Trap action with addition of user defined value that can be set
* and used by higher layer applications.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 34bec9cd572c..0107cbc32fc7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -180,7 +180,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, u8 slot_index,
} else {
/* When reading upper pages 1, 2 and 3 the offset
* starts at 0 and I2C high address is used. Please refer
- * refer to "Memory Organization" figure in SFF-8472
+ * to "Memory Organization" figure in SFF-8472
* specification for graphical depiction.
*/
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
new file mode 100644
index 000000000000..af37e650a8ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022 NVIDIA Corporation and Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/idr.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <net/devlink.h>
+#include "core.h"
+
+#define MLXSW_LINECARD_DEV_ID_NAME "lc"
+
+struct mlxsw_linecard_dev {
+ struct mlxsw_linecard *linecard;
+};
+
+struct mlxsw_linecard_bdev {
+ struct auxiliary_device adev;
+ struct mlxsw_linecard *linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+};
+
+static DEFINE_IDA(mlxsw_linecard_bdev_ida);
+
+static int mlxsw_linecard_bdev_id_alloc(void)
+{
+ return ida_alloc(&mlxsw_linecard_bdev_ida, GFP_KERNEL);
+}
+
+static void mlxsw_linecard_bdev_id_free(int id)
+{
+ ida_free(&mlxsw_linecard_bdev_ida, id);
+}
+
+static void mlxsw_linecard_bdev_release(struct device *device)
+{
+ struct auxiliary_device *adev =
+ container_of(device, struct auxiliary_device, dev);
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+
+ mlxsw_linecard_bdev_id_free(adev->id);
+ kfree(linecard_bdev);
+}
+
+int mlxsw_linecard_bdev_add(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev;
+ int err;
+ int id;
+
+ id = mlxsw_linecard_bdev_id_alloc();
+ if (id < 0)
+ return id;
+
+ linecard_bdev = kzalloc(sizeof(*linecard_bdev), GFP_KERNEL);
+ if (!linecard_bdev) {
+ mlxsw_linecard_bdev_id_free(id);
+ return -ENOMEM;
+ }
+ linecard_bdev->adev.id = id;
+ linecard_bdev->adev.name = MLXSW_LINECARD_DEV_ID_NAME;
+ linecard_bdev->adev.dev.release = mlxsw_linecard_bdev_release;
+ linecard_bdev->adev.dev.parent = linecard->linecards->bus_info->dev;
+ linecard_bdev->linecard = linecard;
+
+ err = auxiliary_device_init(&linecard_bdev->adev);
+ if (err) {
+ mlxsw_linecard_bdev_id_free(id);
+ kfree(linecard_bdev);
+ return err;
+ }
+
+ err = auxiliary_device_add(&linecard_bdev->adev);
+ if (err) {
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ return err;
+ }
+
+ linecard->bdev = linecard_bdev;
+ return 0;
+}
+
+void mlxsw_linecard_bdev_del(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev = linecard->bdev;
+
+ if (!linecard_bdev)
+ /* Unprovisioned line cards do not have an auxiliary device. */
+ return;
+ auxiliary_device_delete(&linecard_bdev->adev);
+ auxiliary_device_uninit(&linecard_bdev->adev);
+ linecard->bdev = NULL;
+}
+
+static int mlxsw_linecard_dev_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_devlink_info_get(linecard, req, extack);
+}
+
+static int
+mlxsw_linecard_dev_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_linecard_dev *linecard_dev = devlink_priv(devlink);
+ struct mlxsw_linecard *linecard = linecard_dev->linecard;
+
+ return mlxsw_linecard_flash_update(devlink, linecard,
+ params->fw, extack);
+}
+
+static const struct devlink_ops mlxsw_linecard_dev_devlink_ops = {
+ .info_get = mlxsw_linecard_dev_devlink_info_get,
+ .flash_update = mlxsw_linecard_dev_devlink_flash_update,
+};
+
+static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+ struct mlxsw_linecard_dev *linecard_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
+ sizeof(*linecard_dev), &adev->dev);
+ if (!devlink)
+ return -ENOMEM;
+ linecard_dev = devlink_priv(devlink);
+ linecard_dev->linecard = linecard_bdev->linecard;
+ linecard_bdev->linecard_dev = linecard_dev;
+
+ devlink_register(devlink);
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ return 0;
+}
+
+static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
+{
+ struct mlxsw_linecard_bdev *linecard_bdev =
+ container_of(adev, struct mlxsw_linecard_bdev, adev);
+ struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
+ struct mlxsw_linecard *linecard = linecard_bdev->linecard;
+
+ devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
+static const struct auxiliary_device_id mlxsw_linecard_bdev_id_table[] = {
+ { .name = KBUILD_MODNAME "." MLXSW_LINECARD_DEV_ID_NAME },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlxsw_linecard_bdev_id_table);
+
+static struct auxiliary_driver mlxsw_linecard_driver = {
+ .name = MLXSW_LINECARD_DEV_ID_NAME,
+ .probe = mlxsw_linecard_bdev_probe,
+ .remove = mlxsw_linecard_bdev_remove,
+ .id_table = mlxsw_linecard_bdev_id_table,
+};
+
+int mlxsw_linecard_driver_register(void)
+{
+ return auxiliary_driver_register(&mlxsw_linecard_driver);
+}
+
+void mlxsw_linecard_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlxsw_linecard_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 5c9869dcf674..ca59f0b946da 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -13,6 +13,7 @@
#include <linux/vmalloc.h>
#include "core.h"
+#include "../mlxfw/mlxfw.h"
struct mlxsw_linecard_ini_file {
__le16 size;
@@ -87,6 +88,351 @@ static const char *mlxsw_linecard_type_name(struct mlxsw_linecard *linecard)
return linecard->name;
}
+struct mlxsw_linecard_device_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+ struct mlxsw_linecard *linecard;
+};
+
+static int mlxsw_linecard_device_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index,
+ u32 *p_max_size,
+ u8 *p_align_bits,
+ u16 *p_max_write_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcqi_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcqi), &mcqi_pl);
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
+ p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size,
+ MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_linecard_device_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev,
+ u32 *fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
+ 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ u16 component_index,
+ u32 component_size)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u8 *data,
+ u16 size, u32 offset)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcda_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcda), &mcda_pl);
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle, u16 component_index)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int mlxsw_linecard_device_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE,
+ 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static int
+mlxsw_linecard_device_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ u8 control_state;
+ u8 error_code;
+ char *mcc_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
+ MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_linecard_device_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static void mlxsw_linecard_device_fw_fsm_release(struct mlxfw_dev *mlxfw_dev,
+ u32 fwhandle)
+{
+ struct mlxsw_linecard_device_fw_info *info =
+ container_of(mlxfw_dev, struct mlxsw_linecard_device_fw_info,
+ mlxfw_dev);
+ struct mlxsw_linecard *linecard = info->linecard;
+ struct mlxsw_core *mlxsw_core = info->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mcc_pl;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index,
+ linecard->device.index,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+ MLXSW_REG(mcc), &mcc_pl);
+ mlxsw_reg_mcc_pack(mcc_pl,
+ MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE,
+ 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_linecard_device_dev_ops = {
+ .component_query = mlxsw_linecard_device_fw_component_query,
+ .fsm_lock = mlxsw_linecard_device_fw_fsm_lock,
+ .fsm_component_update = mlxsw_linecard_device_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_linecard_device_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_linecard_device_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_linecard_device_fw_fsm_activate,
+ .fsm_query_state = mlxsw_linecard_device_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_linecard_device_fw_fsm_cancel,
+ .fsm_release = mlxsw_linecard_device_fw_fsm_release,
+};
+
+int mlxsw_linecard_flash_update(struct devlink *linecard_devlink,
+ struct mlxsw_linecard *linecard,
+ const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ struct mlxsw_linecard_device_fw_info info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_linecard_device_dev_ops,
+ .psid = linecard->device.info.psid,
+ .psid_size = strlen(linecard->device.info.psid),
+ .devlink = linecard_devlink,
+ },
+ .mlxsw_core = mlxsw_core,
+ .linecard = linecard,
+ };
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (!linecard->active) {
+ NL_SET_ERR_MSG_MOD(extack, "Only active line cards can be flashed");
+ err = -EINVAL;
+ goto unlock;
+ }
+ err = mlxsw_core_fw_flash(mlxsw_core, &info.mlxfw_dev,
+ firmware, extack);
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
+static int mlxsw_linecard_device_psid_get(struct mlxsw_linecard *linecard,
+ u8 device_index, char *psid)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ char mddt_pl[MLXSW_REG_MDDT_LEN];
+ char *mgir_pl;
+ int err;
+
+ mlxsw_reg_mddt_pack(mddt_pl, linecard->slot_index, device_index,
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG(mgir), &mgir_pl);
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddt), mddt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(mgir_pl, psid);
+ return 0;
+}
+
+static int mlxsw_linecard_device_info_update(struct mlxsw_linecard *linecard)
+{
+ struct mlxsw_core *mlxsw_core = linecard->linecards->mlxsw_core;
+ bool flashable_found = false;
+ u8 msg_seq = 0;
+
+ do {
+ struct mlxsw_linecard_device_info info;
+ char mddq_pl[MLXSW_REG_MDDQ_LEN];
+ bool flash_owner;
+ bool data_valid;
+ u8 device_index;
+ int err;
+
+ mlxsw_reg_mddq_device_info_pack(mddq_pl, linecard->slot_index,
+ msg_seq);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mddq), mddq_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mddq_device_info_unpack(mddq_pl, &msg_seq,
+ &data_valid, &flash_owner,
+ &device_index,
+ &info.fw_major,
+ &info.fw_minor,
+ &info.fw_sub_minor);
+ if (!data_valid)
+ break;
+ if (!flash_owner) /* We care only about flashable ones. */
+ continue;
+ if (flashable_found) {
+ dev_warn_once(linecard->linecards->bus_info->dev, "linecard %u: More flashable devices present, exposing only the first one\n",
+ linecard->slot_index);
+ return 0;
+ }
+
+ err = mlxsw_linecard_device_psid_get(linecard, device_index,
+ info.psid);
+ if (err)
+ return err;
+
+ linecard->device.info = info;
+ linecard->device.index = device_index;
+ flashable_found = true;
+ } while (msg_seq);
+
+ return 0;
+}
+
static void mlxsw_linecard_provision_fail(struct mlxsw_linecard *linecard)
{
linecard->provisioned = false;
@@ -226,12 +572,57 @@ void mlxsw_linecards_event_ops_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_linecards_event_ops_unregister);
+int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[32];
+ int err;
+
+ mutex_lock(&linecard->lock);
+ if (WARN_ON(!linecard->provisioned)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ sprintf(buf, "%d", linecard->hw_revision);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ goto unlock;
+
+ sprintf(buf, "%d", linecard->ini_version);
+ err = devlink_info_version_running_put(req, "ini.version", buf);
+ if (err)
+ goto unlock;
+
+ if (linecard->active) {
+ struct mlxsw_linecard_device_info *info = &linecard->device.info;
+
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ info->psid);
+
+ sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
+ info->fw_sub_minor);
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&linecard->lock);
+ return err;
+}
+
static int
mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
u16 hw_revision, u16 ini_version)
{
struct mlxsw_linecards *linecards = linecard->linecards;
const char *type;
+ int err;
type = mlxsw_linecard_types_lookup(linecards, card_type);
mlxsw_linecard_status_event_done(linecard,
@@ -252,6 +643,14 @@ mlxsw_linecard_provision_set(struct mlxsw_linecard *linecard, u8 card_type,
linecard->provisioned = true;
linecard->hw_revision = hw_revision;
linecard->ini_version = ini_version;
+
+ err = mlxsw_linecard_bdev_add(linecard);
+ if (err) {
+ linecard->provisioned = false;
+ mlxsw_linecard_provision_fail(linecard);
+ return err;
+ }
+
devlink_linecard_provision_set(linecard->devlink_linecard, type);
return 0;
}
@@ -260,6 +659,7 @@ static void mlxsw_linecard_provision_clear(struct mlxsw_linecard *linecard)
{
mlxsw_linecard_status_event_done(linecard,
MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION);
+ mlxsw_linecard_bdev_del(linecard);
linecard->provisioned = false;
devlink_linecard_provision_clear(linecard->devlink_linecard);
}
@@ -270,6 +670,10 @@ static int mlxsw_linecard_ready_set(struct mlxsw_linecard *linecard)
char mddc_pl[MLXSW_REG_MDDC_LEN];
int err;
+ err = mlxsw_linecard_device_info_update(linecard);
+ if (err)
+ return err;
+
mlxsw_reg_mddc_pack(mddc_pl, linecard->slot_index, false, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mddc), mddc_pl);
if (err)
@@ -885,6 +1289,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
mlxsw_core_flush_owq();
if (linecard->active)
mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
devlink_linecard_destroy(linecard->devlink_linecard);
mutex_destroy(&linecard->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9660d4cce96..d9bf584234a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -359,8 +359,7 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
/* Create port objects for each valid entry */
devl_lock(devlink);
for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0 &&
- !mlxsw_core_port_is_xm(mlxsw_m->core, i)) {
+ if (mlxsw_m->module_to_port[i] > 0) {
err = mlxsw_m_port_create(mlxsw_m,
mlxsw_m->module_to_port[i],
i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index f91dde4df152..50527adc5b5a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -103,6 +103,8 @@ struct mlxsw_pci {
struct pci_dev *pdev;
u8 __iomem *hw_addr;
u64 free_running_clock_offset;
+ u64 utc_sec_offset;
+ u64 utc_nsec_offset;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -456,9 +458,9 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
{
q->u.cq.v = mlxsw_pci->max_cqe_ver;
- /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs)
+ q->num < mlxsw_pci->num_sdq_cqs &&
+ !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
@@ -505,9 +507,32 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
+static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
+ ptrdiff_t off)
+{
+ return ioread32be(mlxsw_pci->hw_addr + off);
+}
+
+static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
+ struct sk_buff *skb,
+ enum mlxsw_pci_cqe_v cqe_v, char *cqe)
+{
+ if (cqe_v != MLXSW_PCI_CQE_V2)
+ return;
+
+ if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
+ return;
+
+ mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
+ mlxsw_skb_cb(skb)->cqe_ts.nsec =
+ mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
+}
+
static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
+ enum mlxsw_pci_cqe_v cqe_v,
char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -527,6 +552,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
if (unlikely(!tx_info.is_emad &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
tx_info.local_port);
skb = NULL;
@@ -647,6 +673,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
}
+ mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
+
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
byte_count -= ETH_FCS_LEN;
@@ -698,7 +726,7 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, ncqe);
+ wqe_counter, q->u.cq.v, ncqe);
q->u.cq.comp_sdq_count++;
} else {
struct mlxsw_pci_queue *rdq;
@@ -1235,6 +1263,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_ubridge) {
+ mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
+ profile->ubridge);
+ }
if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
@@ -1252,12 +1285,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
}
- if (profile->used_kvh_xlt_cache_mode) {
- mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set(
- mbox, profile->kvh_xlt_cache_mode);
- }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1268,31 +1295,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
}
- return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
-}
-
-static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_bus_info *bus_info,
- char *mbox)
-{
- int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox);
- int i;
-
- if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox))
- return 0;
-
- bus_info->xm_exists = true;
-
- if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) {
- dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n");
- return -EINVAL;
+ if (profile->used_cqe_time_stamp_type) {
+ mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
+ profile->cqe_time_stamp_type);
}
- bus_info->xm_local_ports_count = count;
- for (i = 0; i < count; i++)
- bus_info->xm_local_ports[i] =
- mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox,
- i);
- return 0;
+
+ return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
@@ -1306,8 +1316,7 @@ static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
return err;
mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
-
- return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox);
+ return 0;
}
static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1550,6 +1559,24 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->free_running_clock_offset =
mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
+ if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_sec_bar;
+ }
+
+ mlxsw_pci->utc_sec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
+
+ if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
+ dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
+ err = -EINVAL;
+ goto err_utc_nsec_bar;
+ }
+
+ mlxsw_pci->utc_nsec_offset =
+ mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1582,6 +1609,14 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
+ /* Some resources depend on unified bridge model, which is configured
+ * as part of config_profile. Query the resources again to get correct
+ * values.
+ */
+ err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
+ if (err)
+ goto err_requery_resources;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1599,12 +1634,15 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+err_requery_resources:
err_config_profile:
err_cqe_v_check:
err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
+err_utc_nsec_bar:
+err_utc_sec_bar:
err_fr_rn_clk_bar:
err_doorbell_page_bar:
err_iface_rev:
@@ -1819,19 +1857,33 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
static u32 mlxsw_pci_read_frc_h(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_h;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
+ frc_offset_h = mlxsw_pci->free_running_clock_offset;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
}
static u32 mlxsw_pci_read_frc_l(void *bus_priv)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- u64 frc_offset;
+ u64 frc_offset_l;
+
+ frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
+ return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
+}
+
+static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
+}
+
+static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
- frc_offset = mlxsw_pci->free_running_clock_offset;
- return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
+ return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
}
static const struct mlxsw_bus mlxsw_pci_bus = {
@@ -1843,6 +1895,8 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
.read_frc_h = mlxsw_pci_read_frc_h,
.read_frc_l = mlxsw_pci_read_frc_l,
+ .read_utc_sec = mlxsw_pci_read_utc_sec,
+ .read_utc_nsec = mlxsw_pci_read_utc_nsec,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
@@ -1933,7 +1987,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
- mlxsw_pci->bus_info.read_frc_capable = true;
+ mlxsw_pci->bus_info.read_clock_capable = true;
mlxsw_pci->id = id;
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7b531228d6c0..48dbfea0a2a1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -41,9 +41,6 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_H(offset) (offset)
-#define MLXSW_PCI_FREE_RUNNING_CLOCK_L(offset) ((offset) + 4)
-
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
@@ -217,6 +214,25 @@ MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
+/* pci_cqe_time_stamp_low
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_low, 0x0C, 16, 16);
+
#define MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID 0x1F
/* pci_cqe_mirror_tclass
@@ -280,8 +296,67 @@ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
*/
MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+enum mlxsw_pci_cqe_time_stamp_type {
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_USEC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_FRC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC,
+ MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC,
+};
+
+/* pci_cqe_time_stamp_type
+ * Time stamp type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type)
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_type, 0x18, 22, 2);
+
#define MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID 0xFFFFFF
+/* pci_cqe_time_stamp_high
+ * Time stamp of the CQE
+ * Format according to time_stamp_type:
+ * 0: uSec - 1.024uSec (default for devices which do not support
+ * time_stamp_type). Only bits 15:0 are valid
+ * 1: FRC - Free Running Clock - units of 1nSec
+ * 2: UTC - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * 3: Mirror_UTC. UTC time stamp of the original packet that has
+ * MIRROR_SESSION traps
+ * - time_stamp[37:30] = Sec
+ * - time_stamp[29:0] = nSec
+ * Formats 0..2 are configured by
+ * CONFIG_PROFILE.cqe_time_stamp_type for PTP traps
+ * Format 3 is used for MIRROR_SESSION traps
+ * Note that Spectrum does not reveal FRC, UTC and Mirror_UTC
+ */
+MLXSW_ITEM32(pci, cqe2, time_stamp_high, 0x18, 0, 22);
+
+static inline u64 mlxsw_pci_cqe2_time_stamp_get(const char *cqe)
+{
+ u64 ts_high = mlxsw_pci_cqe2_time_stamp_high_get(cqe);
+ u64 ts_low = mlxsw_pci_cqe2_time_stamp_low_get(cqe);
+
+ return ts_high << 16 | ts_low;
+}
+
+static inline u8 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts >> 30 & 0xFF;
+}
+
+static inline u32 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe)
+{
+ u64 full_ts = mlxsw_pci_cqe2_time_stamp_get(cqe);
+
+ return full_ts & 0x3FFFFFFF;
+}
+
/* pci_cqe_mirror_latency
* End-to-end latency of the original packet that does mirroring to the CPU.
* Value of 0xFFFFFF means that the latency is invalid. Units are according to
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 741fd2989d12..ac4d4ea51597 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -15,8 +15,6 @@
#define MLXSW_PORT_SWID_TYPE_IB 1
#define MLXSW_PORT_SWID_TYPE_ETH 2
-#define MLXSW_PORT_MID 0xd000
-
#define MLXSW_PORT_MAX_IB_PHY_PORTS 36
#define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 93af6c974ece..f27bdecdf952 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -322,6 +322,18 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -335,6 +347,15 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_vid
+ * New VID when set_vid=1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when set_vid=0.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
+ MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
/* reg_sfd_uc_system_port
* Unique port identifier for the final destination of the packet.
* Access: RW
@@ -359,7 +380,7 @@ static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index,
static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
enum mlxsw_reg_sfd_rec_policy policy,
- const char *mac, u16 fid_vid,
+ const char *mac, u16 fid_vid, u16 vid,
enum mlxsw_reg_sfd_rec_action action,
u16 local_port)
{
@@ -368,6 +389,8 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_set_vid_set(payload, rec_index, vid ? true : false);
+ mlxsw_reg_sfd_uc_vid_set(payload, rec_index, vid);
mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
}
@@ -379,6 +402,18 @@ static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
+/* reg_sfd_uc_lag_set_vid
+ * Set VID.
+ * 0 - Do not update VID.
+ * 1 - Set VID.
+ * For Spectrum-2 when set_vid=0 and smpe_valid=1, the smpe will modify the vid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_set_vid, MLXSW_REG_SFD_BASE_LEN, 31, 1,
+ MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
/* reg_sfd_uc_lag_fid_vid
* Filtering ID or VLAN ID
* For SwitchX and SwitchX-2:
@@ -393,8 +428,10 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
MLXSW_REG_SFD_REC_LEN, 0x08, false);
/* reg_sfd_uc_lag_lag_vid
- * Indicates VID in case of vFIDs. Reserved for FIDs.
+ * New vlan ID.
* Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and set_vid=0.
*/
MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12,
MLXSW_REG_SFD_REC_LEN, 0x0C, false);
@@ -419,6 +456,7 @@ mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index,
mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0);
mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid);
+ mlxsw_reg_sfd_uc_lag_set_vid_set(payload, rec_index, true);
mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid);
mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id);
}
@@ -997,7 +1035,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u16 local_port,
* to packet types used for flooding.
*/
#define MLXSW_REG_SFGC_ID 0x2011
-#define MLXSW_REG_SFGC_LEN 0x10
+#define MLXSW_REG_SFGC_LEN 0x14
MLXSW_REG_DEFINE(sfgc, MLXSW_REG_SFGC_ID, MLXSW_REG_SFGC_LEN);
@@ -1019,9 +1057,10 @@ enum mlxsw_reg_sfgc_type {
*/
MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
-enum mlxsw_reg_sfgc_bridge_type {
- MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
- MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+/* bridge_type is used in SFGC and SFMR. */
+enum mlxsw_reg_bridge_type {
+ MLXSW_REG_BRIDGE_TYPE_0 = 0, /* Used for .1q FIDs. */
+ MLXSW_REG_BRIDGE_TYPE_1 = 1, /* Used for .1d FIDs. */
};
/* reg_sfgc_bridge_type
@@ -1054,12 +1093,6 @@ MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
*/
MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
-/* reg_sfgc_mid
- * The multicast ID for the swid. Not supported for Spectrum
- * Access: RW
- */
-MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
-
/* reg_sfgc_counter_set_type
* Counter Set Type for flow counters.
* Access: RW
@@ -1072,18 +1105,26 @@ MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
*/
MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+/* reg_sfgc_mid_base
+ * MID Base.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfgc, mid_base, 0x10, 0, 16);
+
static inline void
mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
- enum mlxsw_reg_sfgc_bridge_type bridge_type,
+ enum mlxsw_reg_bridge_type bridge_type,
enum mlxsw_flood_table_type table_type,
- unsigned int flood_table)
+ unsigned int flood_table, u16 mid_base)
{
MLXSW_REG_ZERO(sfgc, payload);
mlxsw_reg_sfgc_type_set(payload, type);
mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
mlxsw_reg_sfgc_table_type_set(payload, table_type);
mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
- mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+ mlxsw_reg_sfgc_mid_base_set(payload, mid_base);
}
/* SFDF - Switch Filtering DB Flush
@@ -1516,7 +1557,7 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u16 local_port,
* virtualized ports.
*/
#define MLXSW_REG_SVFA_ID 0x201C
-#define MLXSW_REG_SVFA_LEN 0x10
+#define MLXSW_REG_SVFA_LEN 0x18
MLXSW_REG_DEFINE(svfa, MLXSW_REG_SVFA_ID, MLXSW_REG_SVFA_LEN);
@@ -1537,6 +1578,7 @@ MLXSW_ITEM32_LP(reg, svfa, 0x00, 16, 0x00, 12);
enum mlxsw_reg_svfa_mt {
MLXSW_REG_SVFA_MT_VID_TO_FID,
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+ MLXSW_REG_SVFA_MT_VNI_TO_FID,
};
/* reg_svfa_mapping_table
@@ -1586,20 +1628,76 @@ MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
*/
MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
-static inline void mlxsw_reg_svfa_pack(char *payload, u16 local_port,
- enum mlxsw_reg_svfa_mt mt, bool valid,
- u16 fid, u16 vid)
+/* reg_svfa_vni
+ * Virtual Network Identifier.
+ * Access: Index
+ *
+ * Note: Reserved when mapping_table is not 2 (VNI mapping table).
+ */
+MLXSW_ITEM32(reg, svfa, vni, 0x10, 0, 24);
+
+/* reg_svfa_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non enabled RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, svfa, irif_v, 0x14, 24, 1);
+
+/* reg_svfa_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, svfa, irif, 0x14, 0, 16);
+
+static inline void __mlxsw_reg_svfa_pack(char *payload,
+ enum mlxsw_reg_svfa_mt mt, bool valid,
+ u16 fid, bool irif_v, u16 irif)
{
MLXSW_REG_ZERO(svfa, payload);
- local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
mlxsw_reg_svfa_swid_set(payload, 0);
- mlxsw_reg_svfa_local_port_set(payload, local_port);
mlxsw_reg_svfa_mapping_table_set(payload, mt);
mlxsw_reg_svfa_v_set(payload, valid);
mlxsw_reg_svfa_fid_set(payload, fid);
+ mlxsw_reg_svfa_irif_v_set(payload, irif_v);
+ mlxsw_reg_svfa_irif_set(payload, irif_v ? irif : 0);
+}
+
+static inline void mlxsw_reg_svfa_port_vid_pack(char *payload, u16 local_port,
+ bool valid, u16 fid, u16 vid,
+ bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_local_port_set(payload, local_port);
+ mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_svfa_vid_pack(char *payload, bool valid, u16 fid,
+ u16 vid, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
mlxsw_reg_svfa_vid_set(payload, vid);
}
+static inline void mlxsw_reg_svfa_vni_pack(char *payload, bool valid, u16 fid,
+ u32 vni, bool irif_v, u16 irif)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VNI_TO_FID;
+
+ __mlxsw_reg_svfa_pack(payload, mt, valid, fid, irif_v, irif);
+ mlxsw_reg_svfa_vni_set(payload, vni);
+}
+
/* SPVTR - Switch Port VLAN Stacking Register
* ------------------------------------------
* The Switch Port VLAN Stacking register configures the VLAN mode of the port
@@ -1741,7 +1839,7 @@ static inline void mlxsw_reg_svpe_pack(char *payload, u16 local_port,
* Creates and configures FIDs.
*/
#define MLXSW_REG_SFMR_ID 0x201F
-#define MLXSW_REG_SFMR_LEN 0x18
+#define MLXSW_REG_SFMR_LEN 0x30
MLXSW_REG_DEFINE(sfmr, MLXSW_REG_SFMR_ID, MLXSW_REG_SFMR_LEN);
@@ -1764,6 +1862,28 @@ MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+/* reg_sfmr_flood_rsp
+ * Router sub-port flooding table.
+ * 0 - Regular flooding table.
+ * 1 - Router sub-port flooding table. For this FID the flooding is per
+ * router-sub-port local_port. Must not be set for a FID which is not a
+ * router-sub-port and must be set prior to enabling the relevant RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_rsp, 0x08, 31, 1);
+
+/* reg_sfmr_flood_bridge_type
+ * Flood bridge type (see SFGC.bridge_type).
+ * 0 - type_0.
+ * 1 - type_1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when flood_rsp=1.
+ */
+MLXSW_ITEM32(reg, sfmr, flood_bridge_type, 0x08, 28, 1);
+
/* reg_sfmr_fid_offset
* FID offset.
* Used to point into the flooding table selected by SFGC register if
@@ -1800,15 +1920,57 @@ MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
/* reg_sfmr_vni
* Virtual Network Identifier.
+ * When legacy bridge model is used, a given VNI can only be assigned to one
+ * FID. When unified bridge model is used, it configures only the FID->VNI,
+ * the VNI->FID is done by SVFA.
* Access: RW
- *
- * Note: A given VNI can only be assigned to one FID.
*/
MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+/* reg_sfmr_irif_v
+ * Ingress RIF valid.
+ * 0 - Ingress RIF is not valid, no ingress RIF assigned.
+ * 1 - Ingress RIF valid.
+ * Must not be set for a non valid RIF.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, sfmr, irif_v, 0x14, 24, 1);
+
+/* reg_sfmr_irif
+ * Ingress RIF (Router Interface).
+ * Range is 0..cap_max_router_interfaces-1.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and when irif_v=0.
+ */
+MLXSW_ITEM32(reg, sfmr, irif, 0x14, 0, 16);
+
+/* reg_sfmr_smpe_valid
+ * SMPE is valid.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe_valid, 0x28, 20, 1);
+
+/* reg_sfmr_smpe
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used, when flood_rsp=1 and on
+ * Spectrum-1.
+ */
+MLXSW_ITEM32(reg, sfmr, smpe, 0x28, 0, 16);
+
static inline void mlxsw_reg_sfmr_pack(char *payload,
enum mlxsw_reg_sfmr_op op, u16 fid,
- u16 fid_offset)
+ u16 fid_offset, bool flood_rsp,
+ enum mlxsw_reg_bridge_type bridge_type,
+ bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(sfmr, payload);
mlxsw_reg_sfmr_op_set(payload, op);
@@ -1816,6 +1978,10 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
mlxsw_reg_sfmr_vtfp_set(payload, false);
mlxsw_reg_sfmr_vv_set(payload, false);
+ mlxsw_reg_sfmr_flood_rsp_set(payload, flood_rsp);
+ mlxsw_reg_sfmr_flood_bridge_type_set(payload, bridge_type);
+ mlxsw_reg_sfmr_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_sfmr_smpe_set(payload, smpe);
}
/* SPVMLR - Switch Port VLAN MAC Learning Register
@@ -2013,6 +2179,45 @@ static inline void mlxsw_reg_spevet_pack(char *payload, u16 local_port,
mlxsw_reg_spevet_et_vlan_set(payload, et_vlan);
}
+/* SMPE - Switch Multicast Port to Egress VID
+ * ------------------------------------------
+ * The switch multicast port to egress VID maps
+ * {egress_port, SMPE index} -> {VID}.
+ */
+#define MLXSW_REG_SMPE_ID 0x202B
+#define MLXSW_REG_SMPE_LEN 0x0C
+
+MLXSW_REG_DEFINE(smpe, MLXSW_REG_SMPE_ID, MLXSW_REG_SMPE_LEN);
+
+/* reg_smpe_local_port
+ * Local port number.
+ * CPU port is not supported.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, smpe, 0x00, 16, 0x00, 12);
+
+/* reg_smpe_smpe_index
+ * Switch multicast port to egress VID.
+ * Range is 0..cap_max_rmpe-1.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smpe, smpe_index, 0x04, 0, 16);
+
+/* reg_smpe_evid
+ * Egress VID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, smpe, evid, 0x08, 0, 12);
+
+static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
+ u16 smpe_index, u16 evid)
+{
+ MLXSW_REG_ZERO(smpe, payload);
+ mlxsw_reg_smpe_local_port_set(payload, local_port);
+ mlxsw_reg_smpe_smpe_index_set(payload, smpe_index);
+ mlxsw_reg_smpe_evid_set(payload, evid);
+}
+
/* SFTR-V2 - Switch Flooding Table Version 2 Register
* --------------------------------------------------
* The switch flooding table is used for flooding packet replication. The table
@@ -2107,6 +2312,23 @@ MLXSW_ITEM32(reg, smid2, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, smid2, mid, 0x00, 0, 16);
+/* reg_smid2_smpe_valid
+ * SMPE is valid.
+ * When not valid, the egress VID will not be modified by the SMPE table.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe_valid, 0x08, 20, 1);
+
+/* reg_smid2_smpe
+ * Switch multicast port to egress VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-2.
+ */
+MLXSW_ITEM32(reg, smid2, smpe, 0x08, 0, 16);
+
/* reg_smid2_port
* Local port memebership (1 bit per port).
* Access: RW
@@ -2120,13 +2342,15 @@ MLXSW_ITEM_BIT_ARRAY(reg, smid2, port, 0x20, 0x80, 1);
MLXSW_ITEM_BIT_ARRAY(reg, smid2, port_mask, 0xA0, 0x80, 1);
static inline void mlxsw_reg_smid2_pack(char *payload, u16 mid, u16 port,
- bool set)
+ bool set, bool smpe_valid, u16 smpe)
{
MLXSW_REG_ZERO(smid2, payload);
mlxsw_reg_smid2_swid_set(payload, 0);
mlxsw_reg_smid2_mid_set(payload, mid);
mlxsw_reg_smid2_port_set(payload, port, set);
mlxsw_reg_smid2_port_mask_set(payload, port, 1);
+ mlxsw_reg_smid2_smpe_valid_set(payload, smpe_valid);
+ mlxsw_reg_smid2_smpe_set(payload, smpe_valid ? smpe : 0);
}
/* CWTP - Congetion WRED ECN TClass Profile
@@ -6701,31 +6925,32 @@ MLXSW_ITEM32(reg, ritr, if_vrrp_id_ipv4, 0x1C, 0, 8);
/* VLAN Interface */
-/* reg_ritr_vlan_if_vid
+/* reg_ritr_vlan_if_vlan_id
* VLAN ID.
* Access: RW
*/
-MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+MLXSW_ITEM32(reg, ritr, vlan_if_vlan_id, 0x08, 0, 12);
+
+/* reg_ritr_vlan_if_efid
+ * Egress FID.
+ * Used to connect the RIF to a bridge.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used and on Spectrum-1.
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_efid, 0x0C, 0, 16);
/* FID Interface */
/* reg_ritr_fid_if_fid
- * Filtering ID. Used to connect a bridge to the router. Only FIDs from
- * the vFID range are supported.
+ * Filtering ID. Used to connect a bridge to the router.
+ * When legacy bridge model is used, only FIDs from the vFID range are
+ * supported. When unified bridge model is used, this is the egress FID for
+ * router to bridge.
* Access: RW
*/
MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
-static inline void mlxsw_reg_ritr_fid_set(char *payload,
- enum mlxsw_reg_ritr_if_type rif_type,
- u16 fid)
-{
- if (rif_type == MLXSW_REG_RITR_FID_IF)
- mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
- else
- mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
-}
-
/* Sub-port Interface */
/* reg_ritr_sp_if_lag
@@ -6742,6 +6967,16 @@ MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+/* reg_ritr_sp_if_efid
+ * Egress filtering ID.
+ * Used to connect the eRIF to a bridge if eRIF-ACL has modified the DMAC or
+ * the VID.
+ * Access: RW
+ *
+ * Note: Reserved when legacy bridge model is used.
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_efid, 0x0C, 0, 16);
+
/* reg_ritr_sp_if_vid
* VLAN ID.
* Access: RW
@@ -6881,10 +7116,11 @@ static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
}
static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
- u16 system_port, u16 vid)
+ u16 system_port, u16 efid, u16 vid)
{
mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_efid_set(payload, efid);
mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
}
@@ -6918,6 +7154,20 @@ static inline void mlxsw_reg_ritr_mac_pack(char *payload, const char *mac)
}
static inline void
+mlxsw_reg_ritr_vlan_if_pack(char *payload, bool enable, u16 rif, u16 vr_id,
+ u16 mtu, const char *mac, u8 mac_profile_id,
+ u16 vlan_id, u16 efid)
+{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_VLAN_IF;
+
+ mlxsw_reg_ritr_pack(payload, enable, type, rif, vr_id, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(payload, mac_profile_id);
+ mlxsw_reg_ritr_vlan_if_vlan_id_set(payload, vlan_id);
+ mlxsw_reg_ritr_vlan_if_efid_set(payload, efid);
+}
+
+static inline void
mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload,
enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
enum mlxsw_reg_ritr_loopback_ipip_options options,
@@ -7848,11 +8098,10 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 *dip)
+ u32 dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip4_set(payload, *dip);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7862,8 +8111,7 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8926,656 +9174,62 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
-/* RXLTE - Router XLT Enable Register
- * ----------------------------------
- * The RXLTE enables XLT (eXtended Lookup Table) LPM lookups if a capable
- * XM is present on the system.
- */
-
-#define MLXSW_REG_RXLTE_ID 0x8050
-#define MLXSW_REG_RXLTE_LEN 0x0C
-
-MLXSW_REG_DEFINE(rxlte, MLXSW_REG_RXLTE_ID, MLXSW_REG_RXLTE_LEN);
-
-/* reg_rxlte_virtual_router
- * Virtual router ID associated with the router interface.
- * Range is 0..cap_max_virtual_routers-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, virtual_router, 0x00, 0, 16);
-
-enum mlxsw_reg_rxlte_protocol {
- MLXSW_REG_RXLTE_PROTOCOL_IPV4,
- MLXSW_REG_RXLTE_PROTOCOL_IPV6,
-};
-
-/* reg_rxlte_protocol
- * Access: Index
- */
-MLXSW_ITEM32(reg, rxlte, protocol, 0x04, 0, 4);
-
-/* reg_rxlte_lpm_xlt_en
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxlte, lpm_xlt_en, 0x08, 0, 1);
-
-static inline void mlxsw_reg_rxlte_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_rxlte_protocol protocol,
- bool lpm_xlt_en)
-{
- MLXSW_REG_ZERO(rxlte, payload);
- mlxsw_reg_rxlte_virtual_router_set(payload, virtual_router);
- mlxsw_reg_rxlte_protocol_set(payload, protocol);
- mlxsw_reg_rxlte_lpm_xlt_en_set(payload, lpm_xlt_en);
-}
-
-/* RXLTM - Router XLT M select Register
- * ------------------------------------
- * The RXLTM configures and selects the M for the XM lookups.
- */
-
-#define MLXSW_REG_RXLTM_ID 0x8051
-#define MLXSW_REG_RXLTM_LEN 0x14
-
-MLXSW_REG_DEFINE(rxltm, MLXSW_REG_RXLTM_ID, MLXSW_REG_RXLTM_LEN);
-
-/* reg_rxltm_m0_val_v6
- * Global M0 value For IPv6.
- * Range 0..128
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v6, 0x10, 16, 8);
-
-/* reg_rxltm_m0_val_v4
- * Global M0 value For IPv4.
- * Range 0..32
- * Access: RW
- */
-MLXSW_ITEM32(reg, rxltm, m0_val_v4, 0x10, 0, 6);
-
-static inline void mlxsw_reg_rxltm_pack(char *payload, u8 m0_val_v4, u8 m0_val_v6)
-{
- MLXSW_REG_ZERO(rxltm, payload);
- mlxsw_reg_rxltm_m0_val_v6_set(payload, m0_val_v6);
- mlxsw_reg_rxltm_m0_val_v4_set(payload, m0_val_v4);
-}
-
-/* RLCMLD - Router LPM Cache ML Delete Register
- * --------------------------------------------
- * The RLCMLD register is used to bulk delete the XLT-LPM cache ML entries.
- * This can be used by SW when L is increased or decreased, thus need to
- * remove entries with old ML values.
- */
-
-#define MLXSW_REG_RLCMLD_ID 0x8055
-#define MLXSW_REG_RLCMLD_LEN 0x30
-
-MLXSW_REG_DEFINE(rlcmld, MLXSW_REG_RLCMLD_ID, MLXSW_REG_RLCMLD_LEN);
-
-enum mlxsw_reg_rlcmld_select {
- MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_ENTRIES,
- MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES,
-};
-
-/* reg_rlcmld_select
- * Which entries to delete.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, select, 0x00, 16, 2);
-
-enum mlxsw_reg_rlcmld_filter_fields {
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL = 0x04,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER = 0x08,
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP = 0x10,
-};
-
-/* reg_rlcmld_filter_fields
- * If a bit is '0' then the relevant field is ignored.
- * Access: Index
+/* REIV - Router Egress Interface to VID Register
+ * ----------------------------------------------
+ * The REIV register maps {eRIF, egress_port} -> VID.
+ * This mapping is done at the egress, after the ACLs.
+ * This mapping always takes effect after router, regardless of cast
+ * (for unicast/multicast/port-base multicast), regardless of eRIF type and
+ * regardless of bridge decisions (e.g. SFD for unicast or SMPE).
+ * Reserved when the RIF is a loopback RIF.
+ *
+ * Note: Reserved when legacy bridge model is used.
*/
-MLXSW_ITEM32(reg, rlcmld, filter_fields, 0x00, 0, 8);
+#define MLXSW_REG_REIV_ID 0x8034
+#define MLXSW_REG_REIV_BASE_LEN 0x20 /* base length, without records */
+#define MLXSW_REG_REIV_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_REIV_REC_MAX_COUNT 256 /* firmware limitation */
+#define MLXSW_REG_REIV_LEN (MLXSW_REG_REIV_BASE_LEN + \
+ MLXSW_REG_REIV_REC_LEN * \
+ MLXSW_REG_REIV_REC_MAX_COUNT)
-enum mlxsw_reg_rlcmld_protocol {
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
-};
+MLXSW_REG_DEFINE(reiv, MLXSW_REG_REIV_ID, MLXSW_REG_REIV_LEN);
-/* reg_rlcmld_protocol
+/* reg_reiv_port_page
+ * Port page - elport_record[0] is 256*port_page.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, protocol, 0x08, 0, 4);
+MLXSW_ITEM32(reg, reiv, port_page, 0x00, 0, 4);
-/* reg_rlcmld_virtual_router
- * Virtual router ID.
- * Range is 0..cap_max_virtual_routers-1
+/* reg_reiv_erif
+ * Egress RIF.
+ * Range is 0..cap_max_router_interfaces-1.
* Access: Index
*/
-MLXSW_ITEM32(reg, rlcmld, virtual_router, 0x0C, 0, 16);
-
-/* reg_rlcmld_dip
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip6, 0x10, 16);
-
-/* reg_rlcmld_dip_mask
- * per bit:
- * 0: no match
- * 1: match
- * Access: Index
- */
-MLXSW_ITEM32(reg, rlcmld, dip_mask4, 0x2C, 0, 32);
-MLXSW_ITEM_BUF(reg, rlcmld, dip_mask6, 0x20, 16);
-
-static inline void __mlxsw_reg_rlcmld_pack(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- enum mlxsw_reg_rlcmld_protocol protocol,
- u16 virtual_router)
-{
- u8 filter_fields = MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER |
- MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP;
-
- MLXSW_REG_ZERO(rlcmld, payload);
- mlxsw_reg_rlcmld_select_set(payload, select);
- mlxsw_reg_rlcmld_filter_fields_set(payload, filter_fields);
- mlxsw_reg_rlcmld_protocol_set(payload, protocol);
- mlxsw_reg_rlcmld_virtual_router_set(payload, virtual_router);
-}
-
-static inline void mlxsw_reg_rlcmld_pack4(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- u32 dip, u32 dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4,
- virtual_router);
- mlxsw_reg_rlcmld_dip4_set(payload, dip);
- mlxsw_reg_rlcmld_dip_mask4_set(payload, dip_mask);
-}
-
-static inline void mlxsw_reg_rlcmld_pack6(char *payload,
- enum mlxsw_reg_rlcmld_select select,
- u16 virtual_router,
- const void *dip, const void *dip_mask)
-{
- __mlxsw_reg_rlcmld_pack(payload, select,
- MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6,
- virtual_router);
- mlxsw_reg_rlcmld_dip6_memcpy_to(payload, dip);
- mlxsw_reg_rlcmld_dip_mask6_memcpy_to(payload, dip_mask);
-}
-
-/* RLPMCE - Router LPM Cache Enable Register
- * -----------------------------------------
- * Allows disabling the LPM cache. Can be changed on the fly.
- */
-
-#define MLXSW_REG_RLPMCE_ID 0x8056
-#define MLXSW_REG_RLPMCE_LEN 0x4
-
-MLXSW_REG_DEFINE(rlpmce, MLXSW_REG_RLPMCE_ID, MLXSW_REG_RLPMCE_LEN);
-
-/* reg_rlpmce_flush
- * Flush:
- * 0: do not flush the cache (default)
- * 1: flush (clear) the cache
- * Access: WO
- */
-MLXSW_ITEM32(reg, rlpmce, flush, 0x00, 4, 1);
-
-/* reg_rlpmce_disable
- * LPM cache:
- * 0: enabled (default)
- * 1: disabled
- * Access: RW
- */
-MLXSW_ITEM32(reg, rlpmce, disable, 0x00, 0, 1);
-
-static inline void mlxsw_reg_rlpmce_pack(char *payload, bool flush,
- bool disable)
-{
- MLXSW_REG_ZERO(rlpmce, payload);
- mlxsw_reg_rlpmce_flush_set(payload, flush);
- mlxsw_reg_rlpmce_disable_set(payload, disable);
-}
-
-/* Note that XLTQ, XMDR, XRMT and XRALXX register positions violate the rule
- * of ordering register definitions by the ID. However, XRALXX pack helpers are
- * using RALXX pack helpers, RALXX registers have higher IDs.
- * Also XMDR is using RALUE enums. XLRQ and XRMT are just put alongside with the
- * related registers.
- */
-
-/* XLTQ - XM Lookup Table Query Register
- * -------------------------------------
- */
-#define MLXSW_REG_XLTQ_ID 0x7802
-#define MLXSW_REG_XLTQ_LEN 0x2C
-
-MLXSW_REG_DEFINE(xltq, MLXSW_REG_XLTQ_ID, MLXSW_REG_XLTQ_LEN);
-
-enum mlxsw_reg_xltq_xm_device_id {
- MLXSW_REG_XLTQ_XM_DEVICE_ID_UNKNOWN,
- MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT = 0xCF71,
-};
-
-/* reg_xltq_xm_device_id
- * XM device ID.
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xm_device_id, 0x04, 0, 16);
-
-/* reg_xltq_xlt_cap_ipv4_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv4_lpm, 0x10, 0, 1);
-
-/* reg_xltq_xlt_cap_ipv6_lpm
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, xlt_cap_ipv6_lpm, 0x10, 1, 1);
-
-/* reg_xltq_cap_xlt_entries
- * Number of XLT entries
- * Note: SW must not fill more than 80% in order to avoid overflow
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_entries, 0x20, 0, 32);
-
-/* reg_xltq_cap_xlt_mtable
- * XLT M-Table max size
- * Access: RO
- */
-MLXSW_ITEM32(reg, xltq, cap_xlt_mtable, 0x24, 0, 32);
+MLXSW_ITEM32(reg, reiv, erif, 0x04, 0, 16);
-static inline void mlxsw_reg_xltq_pack(char *payload)
-{
- MLXSW_REG_ZERO(xltq, payload);
-}
-
-static inline void mlxsw_reg_xltq_unpack(char *payload, u16 *xm_device_id, bool *xlt_cap_ipv4_lpm,
- bool *xlt_cap_ipv6_lpm, u32 *cap_xlt_entries,
- u32 *cap_xlt_mtable)
-{
- *xm_device_id = mlxsw_reg_xltq_xm_device_id_get(payload);
- *xlt_cap_ipv4_lpm = mlxsw_reg_xltq_xlt_cap_ipv4_lpm_get(payload);
- *xlt_cap_ipv6_lpm = mlxsw_reg_xltq_xlt_cap_ipv6_lpm_get(payload);
- *cap_xlt_entries = mlxsw_reg_xltq_cap_xlt_entries_get(payload);
- *cap_xlt_mtable = mlxsw_reg_xltq_cap_xlt_mtable_get(payload);
-}
-
-/* XMDR - XM Direct Register
- * -------------------------
- * The XMDR allows direct access to the XM device via the switch.
- * Working in synchronous mode. FW waits for response from the XLT
- * for each command. FW acks the XMDR accordingly.
- */
-#define MLXSW_REG_XMDR_ID 0x7803
-#define MLXSW_REG_XMDR_BASE_LEN 0x20
-#define MLXSW_REG_XMDR_TRANS_LEN 0x80
-#define MLXSW_REG_XMDR_LEN (MLXSW_REG_XMDR_BASE_LEN + \
- MLXSW_REG_XMDR_TRANS_LEN)
-
-MLXSW_REG_DEFINE(xmdr, MLXSW_REG_XMDR_ID, MLXSW_REG_XMDR_LEN);
-
-/* reg_xmdr_bulk_entry
- * Bulk_entry
- * 0: Last entry - immediate flush of XRT-cache
- * 1: Bulk entry - do not flush the XRT-cache
+/* reg_reiv_rec_update
+ * Update enable (when write):
+ * 0 - Do not update the entry.
+ * 1 - Update the entry.
* Access: OP
*/
-MLXSW_ITEM32(reg, xmdr, bulk_entry, 0x04, 8, 1);
-
-/* reg_xmdr_num_rec
- * Number of records for Direct access to XM
- * Supported: 0..4 commands (except NOP which is a filler)
- * 0 commands is reserved when bulk_entry = 1.
- * 0 commands is allowed when bulk_entry = 0 for immediate XRT-cache flush.
- * Access: OP
- */
-MLXSW_ITEM32(reg, xmdr, num_rec, 0x04, 0, 4);
-
-/* reg_xmdr_reply_vect
- * Reply Vector
- * Bit i for command index i+1
- * values per bit:
- * 0: failed
- * 1: succeeded
- * e.g. if commands 1, 2, 4 succeeded and command 3 failed then binary
- * value will be 0b1011
- * Access: RO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, xmdr, reply_vect, 0x08, 4, 1);
-
-static inline void mlxsw_reg_xmdr_pack(char *payload, bool bulk_entry)
-{
- MLXSW_REG_ZERO(xmdr, payload);
- mlxsw_reg_xmdr_bulk_entry_set(payload, bulk_entry);
-}
-
-enum mlxsw_reg_xmdr_c_cmd_id {
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4 = 0x30,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6 = 0x31,
-};
-
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN 32
-#define MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN 48
-
-/* reg_xmdr_c_cmd_id
- */
-MLXSW_ITEM32(reg, xmdr_c, cmd_id, 0x00, 24, 8);
-
-/* reg_xmdr_c_seq_number
- */
-MLXSW_ITEM32(reg, xmdr_c, seq_number, 0x00, 12, 12);
-
-enum mlxsw_reg_xmdr_c_ltr_op {
- /* Activity is set */
- MLXSW_REG_XMDR_C_LTR_OP_WRITE = 0,
- /* There is no update mask. All fields are updated. */
- MLXSW_REG_XMDR_C_LTR_OP_UPDATE = 1,
- MLXSW_REG_XMDR_C_LTR_OP_DELETE = 2,
-};
-
-/* reg_xmdr_c_ltr_op
- * Operation.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_op, 0x04, 24, 8);
-
-/* reg_xmdr_c_ltr_trap_action
- * Trap action.
- * Values are defined in enum mlxsw_reg_ralue_trap_action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_action, 0x04, 20, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_trap_id_num {
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS0,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS1,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS2,
- MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS3,
-};
-
-/* reg_xmdr_c_ltr_trap_id_num
- * Trap-ID number.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_trap_id_num, 0x04, 16, 4);
-
-/* reg_xmdr_c_ltr_virtual_router
- * Virtual Router ID.
- * Range is 0..cap_max_virtual_routers-1
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_virtual_router, 0x04, 0, 16);
-
-/* reg_xmdr_c_ltr_prefix_len
- * Number of bits in the prefix of the LPM route.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_prefix_len, 0x08, 24, 8);
-
-/* reg_xmdr_c_ltr_bmp_len
- * The best match prefix length in the case that there is no match for
- * longer prefixes.
- * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_bmp_len, 0x08, 16, 8);
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_update, MLXSW_REG_REIV_BASE_LEN, 31, 1,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-/* reg_xmdr_c_ltr_entry_type
- * Entry type.
- * Values are defined in enum mlxsw_reg_ralue_entry_type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_entry_type, 0x08, 4, 4);
-
-enum mlxsw_reg_xmdr_c_ltr_action_type {
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE,
- MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME,
-};
-
-/* reg_xmdr_c_ltr_action_type
- * Action Type.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_action_type, 0x08, 0, 4);
-
-/* reg_xmdr_c_ltr_erif
- * Egress Router Interface.
- * Only relevant in case of LOCAL action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_erif, 0x10, 0, 16);
-
-/* reg_xmdr_c_ltr_adjacency_index
- * Points to the first entry of the group-based ECMP.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_adjacency_index, 0x10, 0, 24);
-
-#define MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC 0xFFFFFF
-
-/* reg_xmdr_c_ltr_pointer_to_tunnel
- * Only relevant in case of IP2ME action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_pointer_to_tunnel, 0x10, 0, 24);
-
-/* reg_xmdr_c_ltr_ecmp_size
- * Amount of sequential entries starting
- * from the adjacency_index (the number of ECMPs).
- * The valid range is 1-64, 512, 1024, 2048 and 4096.
- * Only relevant in case of REMOTE action.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_ecmp_size, 0x14, 0, 32);
-
-/* reg_xmdr_c_ltr_dip*
- * The prefix of the route or of the marker that the object of the LPM
- * is compared with. The most significant bits of the dip are the prefix.
- * The least significant bits must be '0' if the prefix_len is smaller
- * than 128 for IPv6 or smaller than 32 for IPv4.
- */
-MLXSW_ITEM32(reg, xmdr_c, ltr_dip4, 0x1C, 0, 32);
-MLXSW_ITEM_BUF(reg, xmdr_c, ltr_dip6, 0x1C, 16);
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_xmdr_c_cmd_id cmd_id, u16 seq_number,
- enum mlxsw_reg_xmdr_c_ltr_op op, u16 virtual_router,
- u8 prefix_len)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
- u8 num_rec = mlxsw_reg_xmdr_num_rec_get(xmdr_payload);
-
- mlxsw_reg_xmdr_num_rec_set(xmdr_payload, num_rec + 1);
-
- mlxsw_reg_xmdr_c_cmd_id_set(payload, cmd_id);
- mlxsw_reg_xmdr_c_seq_number_set(payload, seq_number);
- mlxsw_reg_xmdr_c_ltr_op_set(payload, op);
- mlxsw_reg_xmdr_c_ltr_virtual_router_set(payload, virtual_router);
- mlxsw_reg_xmdr_c_ltr_prefix_len_set(payload, prefix_len);
- mlxsw_reg_xmdr_c_ltr_entry_type_set(payload,
- MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
- mlxsw_reg_xmdr_c_ltr_bmp_len_set(payload, prefix_len);
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack4(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, u32 *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip4_set(payload, *dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN;
-}
-
-static inline unsigned int
-mlxsw_reg_xmdr_c_ltr_pack6(char *xmdr_payload, unsigned int trans_offset,
- u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op,
- u16 virtual_router, u8 prefix_len, const void *dip)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset,
- MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6,
- seq_number, op, virtual_router, prefix_len);
- if (dip)
- mlxsw_reg_xmdr_c_ltr_dip6_memcpy_to(payload, dip);
- return MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN;
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_remote_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num,
- u32 adjacency_index, u16 ecmp_size)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_adjacency_index_set(payload, adjacency_index);
- mlxsw_reg_xmdr_c_ltr_ecmp_size_set(payload, ecmp_size);
-}
-
-static inline void
-mlxsw_reg_xmdr_c_ltr_act_local_pack(char *xmdr_payload, unsigned int trans_offset,
- enum mlxsw_reg_ralue_trap_action trap_action,
- enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, u16 erif)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL);
- mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action);
- mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num);
- mlxsw_reg_xmdr_c_ltr_erif_set(payload, erif);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(char *xmdr_payload,
- unsigned int trans_offset)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload,
- MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC);
-}
-
-static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(char *xmdr_payload,
- unsigned int trans_offset,
- u32 pointer_to_tunnel)
-{
- char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset;
-
- mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME);
- mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, pointer_to_tunnel);
-}
-
-/* XRMT - XM Router M Table Register
- * ---------------------------------
- * The XRMT configures the M-Table for the XLT-LPM.
- */
-#define MLXSW_REG_XRMT_ID 0x7810
-#define MLXSW_REG_XRMT_LEN 0x14
-
-MLXSW_REG_DEFINE(xrmt, MLXSW_REG_XRMT_ID, MLXSW_REG_XRMT_LEN);
-
-/* reg_xrmt_index
- * Index in M-Table.
- * Range 0..cap_xlt_mtable-1
- * Access: Index
- */
-MLXSW_ITEM32(reg, xrmt, index, 0x04, 0, 20);
-
-/* reg_xrmt_l0_val
+/* reg_reiv_rec_evid
+ * Egress VID.
+ * Range is 0..4095.
* Access: RW
*/
-MLXSW_ITEM32(reg, xrmt, l0_val, 0x10, 24, 8);
-
-static inline void mlxsw_reg_xrmt_pack(char *payload, u32 index, u8 l0_val)
-{
- MLXSW_REG_ZERO(xrmt, payload);
- mlxsw_reg_xrmt_index_set(payload, index);
- mlxsw_reg_xrmt_l0_val_set(payload, l0_val);
-}
-
-/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
- * -----------------------------------------------------------
- * The XRALTA is used to allocate the XLT LPM trees.
- *
- * This register embeds original RALTA register.
- */
-#define MLXSW_REG_XRALTA_ID 0x7811
-#define MLXSW_REG_XRALTA_LEN 0x08
-#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+MLXSW_ITEM32_INDEXED(reg, reiv, rec_evid, MLXSW_REG_REIV_BASE_LEN, 0, 12,
+ MLXSW_REG_REIV_REC_LEN, 0x00, false);
-MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
-
-static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
+static inline void mlxsw_reg_reiv_pack(char *payload, u8 port_page, u16 erif)
{
- char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
-
- MLXSW_REG_ZERO(xralta, payload);
- mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
-}
-
-/* XRALST - XM Router Algorithmic LPM Structure Tree Register
- * ----------------------------------------------------------
- * The XRALST is used to set and query the structure of an XLT LPM tree.
- *
- * This register embeds original RALST register.
- */
-#define MLXSW_REG_XRALST_ID 0x7812
-#define MLXSW_REG_XRALST_LEN 0x108
-#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
-
-static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- MLXSW_REG_ZERO(xralst, payload);
- mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
-}
-
-static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
- u8 left_child_bin,
- u8 right_child_bin)
-{
- char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
-
- mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
- right_child_bin);
-}
-
-/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
- * --------------------------------------------------------
- * The XRALTB register is used to bind virtual router and protocol
- * to an allocated LPM tree.
- *
- * This register embeds original RALTB register.
- */
-#define MLXSW_REG_XRALTB_ID 0x7813
-#define MLXSW_REG_XRALTB_LEN 0x08
-#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
-
-MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
-
-static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
- enum mlxsw_reg_ralxx_protocol protocol,
- u8 tree_id)
-{
- char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
-
- MLXSW_REG_ZERO(xraltb, payload);
- mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+ MLXSW_REG_ZERO(reiv, payload);
+ mlxsw_reg_reiv_port_page_set(payload, port_page);
+ mlxsw_reg_reiv_erif_set(payload, erif);
}
/* MFCR - Management Fan Control Register
@@ -10693,6 +10347,8 @@ MLXSW_REG_DEFINE(mtutc, MLXSW_REG_MTUTC_ID, MLXSW_REG_MTUTC_LEN);
enum mlxsw_reg_mtutc_operation {
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC = 0,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 1,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME = 2,
MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ = 3,
};
@@ -10705,25 +10361,50 @@ MLXSW_ITEM32(reg, mtutc, operation, 0x00, 0, 4);
/* reg_mtutc_freq_adjustment
* Frequency adjustment: Every PPS the HW frequency will be
* adjusted by this value. Units of HW clock, where HW counts
- * 10^9 HW clocks for 1 HW second.
+ * 10^9 HW clocks for 1 HW second. Range is from -50,000,000 to +50,000,000.
+ * In Spectrum-2, the field is reversed, positive values mean to decrease the
+ * frequency.
* Access: RW
*/
MLXSW_ITEM32(reg, mtutc, freq_adjustment, 0x04, 0, 32);
+#define MLXSW_REG_MTUTC_MAX_FREQ_ADJ (50 * 1000 * 1000)
+
/* reg_mtutc_utc_sec
* UTC seconds.
* Access: WO
*/
MLXSW_ITEM32(reg, mtutc, utc_sec, 0x10, 0, 32);
+/* reg_mtutc_utc_nsec
+ * UTC nSecs.
+ * Range 0..(10^9-1)
+ * Updated when operation is SET_TIME_IMMEDIATE.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, utc_nsec, 0x14, 0, 30);
+
+/* reg_mtutc_time_adjustment
+ * Time adjustment.
+ * Units of nSec.
+ * Range is from -32768 to +32767.
+ * Updated when operation is ADJUST_TIME.
+ * Reserved on Spectrum-1.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mtutc, time_adjustment, 0x18, 0, 32);
+
static inline void
mlxsw_reg_mtutc_pack(char *payload, enum mlxsw_reg_mtutc_operation oper,
- u32 freq_adj, u32 utc_sec)
+ u32 freq_adj, u32 utc_sec, u32 utc_nsec, u32 time_adj)
{
MLXSW_REG_ZERO(mtutc, payload);
mlxsw_reg_mtutc_operation_set(payload, oper);
mlxsw_reg_mtutc_freq_adjustment_set(payload, freq_adj);
mlxsw_reg_mtutc_utc_sec_set(payload, utc_sec);
+ mlxsw_reg_mtutc_utc_nsec_set(payload, utc_nsec);
+ mlxsw_reg_mtutc_time_adjustment_set(payload, time_adj);
}
/* MCQI - Management Component Query Information
@@ -11391,15 +11072,76 @@ MLXSW_ITEM32(reg, mtptpt, trap_id, 0x00, 0, 4);
*/
MLXSW_ITEM32(reg, mtptpt, message_type, 0x04, 0, 16);
-static inline void mlxsw_reg_mtptptp_pack(char *payload,
- enum mlxsw_reg_mtptpt_trap_id trap_id,
- u16 message_type)
+static inline void mlxsw_reg_mtptpt_pack(char *payload,
+ enum mlxsw_reg_mtptpt_trap_id trap_id,
+ u16 message_type)
{
MLXSW_REG_ZERO(mtptpt, payload);
mlxsw_reg_mtptpt_trap_id_set(payload, trap_id);
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MTPCPC - Monitoring Time Precision Correction Port Configuration Register
+ * -------------------------------------------------------------------------
+ */
+#define MLXSW_REG_MTPCPC_ID 0x9093
+#define MLXSW_REG_MTPCPC_LEN 0x2C
+
+MLXSW_REG_DEFINE(mtpcpc, MLXSW_REG_MTPCPC_ID, MLXSW_REG_MTPCPC_LEN);
+
+/* reg_mtpcpc_pport
+ * Per port:
+ * 0: config is global. When reading - the local_port is 1.
+ * 1: config is per port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mtpcpc, pport, 0x00, 31, 1);
+
+/* reg_mtpcpc_local_port
+ * Local port number.
+ * Supported to/from CPU port.
+ * Reserved when pport = 0.
+ * Access: Index
+ */
+MLXSW_ITEM32_LP(reg, mtpcpc, 0x00, 16, 0x00, 12);
+
+/* reg_mtpcpc_ptp_trap_en
+ * Enable PTP traps.
+ * The trap_id is configured by MTPTPT.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ptp_trap_en, 0x04, 0, 1);
+
+/* reg_mtpcpc_ing_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at ingress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, ing_correction_message_type, 0x10, 0, 16);
+
+/* reg_mtpcpc_egr_correction_message_type
+ * Bitwise vector of PTP message types to update correction-field at egress.
+ * MessageType field as defined by IEEE 1588 Each bit corresponds to a value
+ * (e.g. Bit0: Sync, Bit1: Delay_Req). Supported also from CPU port.
+ * Default all 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtpcpc, egr_correction_message_type, 0x14, 0, 16);
+
+static inline void mlxsw_reg_mtpcpc_pack(char *payload, bool pport,
+ u16 local_port, bool ptp_trap_en,
+ u16 ing, u16 egr)
+{
+ MLXSW_REG_ZERO(mtpcpc, payload);
+ mlxsw_reg_mtpcpc_pport_set(payload, pport);
+ mlxsw_reg_mtpcpc_local_port_set(payload, pport ? local_port : 0);
+ mlxsw_reg_mtpcpc_ptp_trap_en_set(payload, ptp_trap_en);
+ mlxsw_reg_mtpcpc_ing_correction_message_type_set(payload, ing);
+ mlxsw_reg_mtpcpc_egr_correction_message_type_set(payload, egr);
+}
+
/* MFGD - Monitoring FW General Debug Register
* -------------------------------------------
*/
@@ -11622,6 +11364,95 @@ mlxsw_reg_mbct_unpack(const char *payload, u8 *p_slot_index,
*p_fsm_state = mlxsw_reg_mbct_fsm_state_get(payload);
}
+/* MDDT - Management DownStream Device Tunneling Register
+ * ------------------------------------------------------
+ * This register allows to deliver query and request messages (PRM registers,
+ * commands) to a DownStream device.
+ */
+#define MLXSW_REG_MDDT_ID 0x9160
+#define MLXSW_REG_MDDT_LEN 0x110
+
+MLXSW_REG_DEFINE(mddt, MLXSW_REG_MDDT_ID, MLXSW_REG_MDDT_LEN);
+
+/* reg_mddt_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, slot_index, 0x00, 8, 4);
+
+/* reg_mddt_device_index
+ * Device index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, device_index, 0x00, 0, 8);
+
+/* reg_mddt_read_size
+ * Read size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, read_size, 0x04, 24, 8);
+
+/* reg_mddt_write_size
+ * Write size in D-Words.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, write_size, 0x04, 16, 8);
+
+enum mlxsw_reg_mddt_status {
+ MLXSW_REG_MDDT_STATUS_OK,
+};
+
+/* reg_mddt_status
+ * Return code of the Downstream Device to the register that was sent.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddt, status, 0x0C, 24, 8);
+
+enum mlxsw_reg_mddt_method {
+ MLXSW_REG_MDDT_METHOD_QUERY,
+ MLXSW_REG_MDDT_METHOD_WRITE,
+};
+
+/* reg_mddt_method
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, mddt, method, 0x0C, 22, 2);
+
+/* reg_mddt_register_id
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddt, register_id, 0x0C, 0, 16);
+
+#define MLXSW_REG_MDDT_PAYLOAD_OFFSET 0x0C
+#define MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN 4
+
+static inline char *mlxsw_reg_mddt_inner_payload(char *payload)
+{
+ return payload + MLXSW_REG_MDDT_PAYLOAD_OFFSET +
+ MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+}
+
+static inline void mlxsw_reg_mddt_pack(char *payload, u8 slot_index,
+ u8 device_index,
+ enum mlxsw_reg_mddt_method method,
+ const struct mlxsw_reg_info *reg,
+ char **inner_payload)
+{
+ int len = reg->len + MLXSW_REG_MDDT_PRM_REGISTER_HEADER_LEN;
+
+ if (WARN_ON(len + MLXSW_REG_MDDT_PAYLOAD_OFFSET > MLXSW_REG_MDDT_LEN))
+ len = MLXSW_REG_MDDT_LEN - MLXSW_REG_MDDT_PAYLOAD_OFFSET;
+
+ MLXSW_REG_ZERO(mddt, payload);
+ mlxsw_reg_mddt_slot_index_set(payload, slot_index);
+ mlxsw_reg_mddt_device_index_set(payload, device_index);
+ mlxsw_reg_mddt_method_set(payload, method);
+ mlxsw_reg_mddt_register_id_set(payload, reg->id);
+ mlxsw_reg_mddt_read_size_set(payload, len / 4);
+ mlxsw_reg_mddt_write_size_set(payload, len / 4);
+ *inner_payload = mlxsw_reg_mddt_inner_payload(payload);
+}
+
/* MDDQ - Management DownStream Device Query Register
* --------------------------------------------------
* This register allows to query the DownStream device properties. The desired
@@ -11643,7 +11474,11 @@ MLXSW_ITEM32(reg, mddq, sie, 0x00, 31, 1);
enum mlxsw_reg_mddq_query_type {
MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_INFO = 1,
- MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME = 3,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO, /* If there are no devices
+ * on the slot, data_valid
+ * will be '0'.
+ */
+ MLXSW_REG_MDDQ_QUERY_TYPE_SLOT_NAME,
};
/* reg_mddq_query_type
@@ -11657,6 +11492,28 @@ MLXSW_ITEM32(reg, mddq, query_type, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, mddq, slot_index, 0x00, 0, 4);
+/* reg_mddq_response_msg_seq
+ * Response message sequential number. For a specific request, the response
+ * message sequential number is the following one. In addition, the last
+ * message should be 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, response_msg_seq, 0x04, 16, 8);
+
+/* reg_mddq_request_msg_seq
+ * Request message sequential number.
+ * The first message number should be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mddq, request_msg_seq, 0x04, 0, 8);
+
+/* reg_mddq_data_valid
+ * If set, the data in the data field is valid and contain the information
+ * for the queried index.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, data_valid, 0x08, 31, 1);
+
/* reg_mddq_slot_info_provisioned
* If set, the INI file is applied and the card is provisioned.
* Access: RO
@@ -11743,6 +11600,61 @@ mlxsw_reg_mddq_slot_info_unpack(const char *payload, u8 *p_slot_index,
*p_card_type = mlxsw_reg_mddq_slot_info_card_type_get(payload);
}
+/* reg_mddq_device_info_flash_owner
+ * If set, the device is the flash owner. Otherwise, a shared flash
+ * is used by this device (another device is the flash owner).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_flash_owner, 0x10, 30, 1);
+
+/* reg_mddq_device_info_device_index
+ * Device index. The first device should number 0.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_device_index, 0x10, 0, 8);
+
+/* reg_mddq_device_info_fw_major
+ * Major FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_major, 0x14, 16, 16);
+
+/* reg_mddq_device_info_fw_minor
+ * Minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_minor, 0x18, 16, 16);
+
+/* reg_mddq_device_info_fw_sub_minor
+ * Sub-minor FW version number.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mddq, device_info_fw_sub_minor, 0x18, 0, 16);
+
+static inline void
+mlxsw_reg_mddq_device_info_pack(char *payload, u8 slot_index,
+ u8 request_msg_seq)
+{
+ __mlxsw_reg_mddq_pack(payload, slot_index,
+ MLXSW_REG_MDDQ_QUERY_TYPE_DEVICE_INFO);
+ mlxsw_reg_mddq_request_msg_seq_set(payload, request_msg_seq);
+}
+
+static inline void
+mlxsw_reg_mddq_device_info_unpack(const char *payload, u8 *p_response_msg_seq,
+ bool *p_data_valid, bool *p_flash_owner,
+ u8 *p_device_index, u16 *p_fw_major,
+ u16 *p_fw_minor, u16 *p_fw_sub_minor)
+{
+ *p_response_msg_seq = mlxsw_reg_mddq_response_msg_seq_get(payload);
+ *p_data_valid = mlxsw_reg_mddq_data_valid_get(payload);
+ *p_flash_owner = mlxsw_reg_mddq_device_info_flash_owner_get(payload);
+ *p_device_index = mlxsw_reg_mddq_device_info_device_index_get(payload);
+ *p_fw_major = mlxsw_reg_mddq_device_info_fw_major_get(payload);
+ *p_fw_minor = mlxsw_reg_mddq_device_info_fw_minor_get(payload);
+ *p_fw_sub_minor = mlxsw_reg_mddq_device_info_fw_sub_minor_get(payload);
+}
+
#define MLXSW_REG_MDDQ_SLOT_ASCII_NAME_LEN 20
/* reg_mddq_slot_ascii_name
@@ -13011,6 +12923,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvmlr),
MLXSW_REG(spvc),
MLXSW_REG(spevet),
+ MLXSW_REG(smpe),
MLXSW_REG(sftr2),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
@@ -13084,16 +12997,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
- MLXSW_REG(rxlte),
- MLXSW_REG(rxltm),
- MLXSW_REG(rlcmld),
- MLXSW_REG(rlpmce),
- MLXSW_REG(xltq),
- MLXSW_REG(xmdr),
- MLXSW_REG(xrmt),
- MLXSW_REG(xralta),
- MLXSW_REG(xralst),
- MLXSW_REG(xraltb),
+ MLXSW_REG(reiv),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
@@ -13124,9 +13028,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mtpcpc),
MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
MLXSW_REG(mbct),
+ MLXSW_REG(mddt),
MLXSW_REG(mddq),
MLXSW_REG(mddc),
MLXSW_REG(mfde),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index daacf6291253..19ae0d1c74a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -11,6 +11,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SIZE,
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
+ MLXSW_RES_ID_PGT_SIZE,
MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE,
MLXSW_RES_ID_MAX_KVD_ACTION_SETS,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
@@ -23,6 +24,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
+ MLXSW_RES_ID_FID,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
@@ -69,6 +71,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SIZE] = 0x1001,
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
+ [MLXSW_RES_ID_PGT_SIZE] = 0x1004,
[MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005,
[MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
@@ -81,6 +84,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
+ [MLXSW_RES_ID_FID] = 0x2512,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index cafd206e8d7e..1e240cdd9cbd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -29,6 +29,7 @@
#include <net/pkt_cls.h>
#include <net/netevent.h>
#include <net/addrconf.h>
+#include <linux/ptp_classify.h>
#include "spectrum.h"
#include "pci.h"
@@ -166,7 +167,7 @@ MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
* set, otherwise calculated based on the packet's VID using VID to FID mapping.
* Valid for data packets only.
*/
-MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
/* tx_hdr_type
* 0 - Data packets
@@ -230,8 +231,8 @@ void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
counter_index);
}
-static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
{
char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
@@ -246,6 +247,82 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+int
+mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ char *txhdr;
+ u16 max_fid;
+ int err;
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ err = -ENOMEM;
+ goto err_skb_cow_head;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
+ err = -EIO;
+ goto err_res_valid;
+ }
+ max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
+
+ txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+ memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+ mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+ mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+ mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
+ mlxsw_tx_hdr_fid_valid_set(txhdr, true);
+ mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
+ mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
+ return 0;
+
+err_res_valid:
+err_skb_cow_head:
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return err;
+}
+
+static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
+{
+ unsigned int type;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ type = ptp_classify_raw(skb);
+ return !!ptp_parse_header(skb, type);
+}
+
+static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+
+ /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
+ * need special handling and cannot be transmitted as regular control
+ * packets.
+ */
+ if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
+ return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
+ mlxsw_sp_port, skb,
+ tx_info);
+
+ if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
{
switch (state) {
@@ -648,12 +725,6 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
- this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
@@ -664,7 +735,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- mlxsw_sp_txhdr_construct(skb, &tx_info);
+ err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
+ &tx_info);
+ if (err)
+ return NETDEV_TX_OK;
+
/* TX header is consumed by HW on the way so we shouldn't count its
* bytes as being sent.
*/
@@ -1999,7 +2074,6 @@ __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
for (i = 1; i < max_ports; i++)
@@ -2007,12 +2081,10 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
/* Make sure all scheduled events are processed */
__mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
- devl_lock(devlink);
for (i = 1; i < max_ports; i++)
if (mlxsw_sp_port_created(mlxsw_sp, i))
mlxsw_sp_port_remove(mlxsw_sp, i);
mlxsw_sp_cpu_port_remove(mlxsw_sp);
- devl_unlock(devlink);
kfree(mlxsw_sp->ports);
mlxsw_sp->ports = NULL;
}
@@ -2034,7 +2106,6 @@ mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port_mapping_events *events;
struct mlxsw_sp_port_mapping *port_mapping;
size_t alloc_size;
@@ -2057,7 +2128,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
goto err_event_enable;
}
- devl_lock(devlink);
err = mlxsw_sp_cpu_port_create(mlxsw_sp);
if (err)
goto err_cpu_port_create;
@@ -2070,7 +2140,6 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_port_create;
}
- devl_unlock(devlink);
return 0;
err_port_create:
@@ -2080,7 +2149,6 @@ err_port_create:
i = max_ports;
mlxsw_sp_cpu_port_remove(mlxsw_sp);
err_cpu_port_create:
- devl_unlock(devlink);
err_event_enable:
for (i--; i >= 1; i--)
mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
@@ -2105,9 +2173,6 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
for (i = 1; i < max_ports; i++) {
- if (mlxsw_core_port_is_xm(mlxsw_sp->core, i))
- continue;
-
port_mapping = &mlxsw_sp->port_mapping[i];
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
if (err)
@@ -2676,6 +2741,7 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.get_stats_count = mlxsw_sp1_get_stats_count,
.get_stats_strings = mlxsw_sp1_get_stats_strings,
.get_stats = mlxsw_sp1_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -2692,6 +2758,24 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.get_stats_count = mlxsw_sp2_get_stats_count,
.get_stats_strings = mlxsw_sp2_get_stats_strings,
.get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
+};
+
+static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
+ .clock_init = mlxsw_sp2_ptp_clock_init,
+ .clock_fini = mlxsw_sp2_ptp_clock_fini,
+ .init = mlxsw_sp2_ptp_init,
+ .fini = mlxsw_sp2_ptp_fini,
+ .receive = mlxsw_sp2_ptp_receive,
+ .transmitted = mlxsw_sp2_ptp_transmitted,
+ .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
+ .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
+ .shaper_work = mlxsw_sp2_ptp_shaper_work,
+ .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
+ .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
};
struct mlxsw_sp_sample_trigger_node {
@@ -3013,6 +3097,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_pgt_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
+ goto err_pgt_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3100,7 +3190,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_router_init;
}
- if (mlxsw_sp->bus_info->read_frc_capable) {
+ if (mlxsw_sp->bus_info->read_clock_capable) {
/* NULL is a valid return value from clock_init */
mlxsw_sp->clock =
mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
@@ -3204,6 +3294,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_pgt_fini(mlxsw_sp);
+err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
return err;
@@ -3235,7 +3327,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
+ mlxsw_sp->pgt_smpe_index_valid = true;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3267,7 +3361,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3299,7 +3395,9 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3323,7 +3421,7 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
- mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
+ mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
@@ -3331,7 +3429,9 @@ static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
mlxsw_sp->listeners = mlxsw_sp2_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
+ mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
+ mlxsw_sp->pgt_smpe_index_valid = false;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
}
@@ -3364,28 +3464,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
}
-/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
- * 802.1Q FIDs
- */
-#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
- VLAN_VID_MASK - 1)
-
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
.used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
@@ -3399,25 +3491,22 @@ static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
};
static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
- .used_max_mid = 1,
- .max_mid = MLXSW_SP_MID_MAX,
- .used_flood_tables = 1,
- .used_flood_mode = 1,
- .flood_mode = 3,
- .max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvh_xlt_cache_mode = 1,
- .kvh_xlt_cache_mode = 1,
+ .used_ubridge = 1,
+ .ubridge = 1,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
static void
@@ -3477,19 +3566,19 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
&hash_single_size_params);
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
if (err)
return err;
linear_size = profile->kvd_linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- linear_size,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_RESOURCE_KVD,
- &linear_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
+ linear_size,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_RESOURCE_KVD,
+ &linear_size_params);
if (err)
return err;
@@ -3502,20 +3591,20 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- double_size,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_double_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
+ double_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_double_size_params);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- single_size,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_RESOURCE_KVD,
- &hash_single_size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
+ single_size,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_RESOURCE_KVD,
+ &hash_single_size_params);
if (err)
return err;
@@ -3536,10 +3625,10 @@ static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_KVD_GRANULARITY,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- kvd_size, MLXSW_SP_RESOURCE_KVD,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &kvd_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
}
static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
@@ -3555,10 +3644,10 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&span_size_params, max_span, max_span,
1, DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
- max_span, MLXSW_SP_RESOURCE_SPAN,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &span_size_params);
+ return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
+ max_span, MLXSW_SP_RESOURCE_SPAN,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &span_size_params);
}
static int
@@ -3577,12 +3666,31 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- return devlink_resource_register(devlink,
- "rif_mac_profiles",
- max_rif_mac_profiles,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ return devl_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
+static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u64 max_rifs;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
+ return -EIO;
+
+ max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
+ devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devl_resource_register(devlink, "rifs", max_rifs,
+ MLXSW_SP_RESOURCE_RIFS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
}
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
@@ -3609,13 +3717,18 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3643,13 +3756,18 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
goto err_resources_rif_mac_profile_register;
+ err = mlxsw_sp_resources_rifs_register(mlxsw_core);
+ if (err)
+ goto err_resources_rifs_register;
+
return 0;
+err_resources_rifs_register:
err_resources_rif_mac_profile_register:
err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
- devlink_resources_unregister(priv_to_devlink(mlxsw_core));
+ devl_resources_unregister(priv_to_devlink(mlxsw_core));
return err;
}
@@ -3673,15 +3791,15 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
* granularity from the profile. In case the user
* provided the sizes they are obtained via devlink.
*/
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- p_linear_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ p_linear_size);
if (err)
*p_linear_size = profile->kvd_linear_size;
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- p_double_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ p_double_size);
if (err) {
double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_linear_size;
@@ -3692,9 +3810,9 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
MLXSW_SP_KVD_GRANULARITY);
}
- err = devlink_resource_size_get(devlink,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- p_single_size);
+ err = devl_resource_size_get(devlink,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ p_single_size);
if (err)
*p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
*p_double_size - *p_linear_size;
@@ -3807,6 +3925,7 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
+ .sdq_supports_cqe_v2 = false,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
@@ -3845,6 +3964,7 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
@@ -3883,6 +4003,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
static struct mlxsw_driver mlxsw_sp4_driver = {
@@ -3919,6 +4040,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
+ .sdq_supports_cqe_v2 = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a60d2bbd3aa6..c8ff2a6d7e90 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -68,6 +68,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ MLXSW_SP_RESOURCE_RIFS,
};
struct mlxsw_sp_port;
@@ -111,15 +112,6 @@ enum mlxsw_sp_nve_type {
MLXSW_SP_NVE_TYPE_VXLAN,
};
-struct mlxsw_sp_mid {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 fid;
- u16 mid;
- bool in_hw;
- unsigned long *ports_in_mid; /* bits array */
-};
-
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
@@ -142,6 +134,7 @@ struct mlxsw_sp_ptp_ops;
struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
+struct mlxsw_sp_pgt;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -210,10 +203,13 @@ struct mlxsw_sp {
const struct mlxsw_sp_mall_ops *mall_ops;
const struct mlxsw_sp_router_ops *router_ops;
const struct mlxsw_listener *listeners;
+ const struct mlxsw_sp_fid_family **fid_family_arr;
size_t listeners_count;
u32 lowest_shaper_bs;
struct rhashtable ipv6_addr_ht;
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
+ struct mlxsw_sp_pgt *pgt;
+ bool pgt_smpe_index_valid;
};
struct mlxsw_sp_ptp_ops {
@@ -247,6 +243,10 @@ struct mlxsw_sp_ptp_ops {
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+ int (*txhdr_construct)(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
};
static inline struct mlxsw_sp_upper *
@@ -389,6 +389,31 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
+struct mlxsw_sp_ports_bitmap {
+ unsigned long *bitmap;
+ unsigned int nbits;
+};
+
+static inline int
+mlxsw_sp_port_bitmap_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ unsigned int nbits = mlxsw_core_max_ports(mlxsw_sp->core);
+
+ ports_bm->nbits = nbits;
+ ports_bm->bitmap = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!ports_bm->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+mlxsw_sp_port_bitmap_fini(struct mlxsw_sp_ports_bitmap *ports_bm)
+{
+ bitmap_free(ports_bm->bitmap);
+}
+
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
@@ -679,6 +704,12 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
+void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+int mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -715,6 +746,7 @@ union mlxsw_sp_l3addr {
struct in6_addr addr6;
};
+u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
@@ -1236,7 +1268,6 @@ int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index);
int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
@@ -1264,7 +1295,8 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid);
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1286,6 +1318,9 @@ void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
+extern const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[];
+extern const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[];
+
/* spectrum_mr.c */
enum mlxsw_sp_mr_route_prio {
MLXSW_SP_MR_ROUTE_PRIO_SG,
@@ -1443,4 +1478,16 @@ int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+/* spectrum_pgt.c */
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+ u16 count);
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member);
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
index d20e794e01ca..1e3fc989393c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
@@ -216,8 +216,8 @@ mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
u64 resource_size;
int err;
- err = devlink_resource_size_get(devlink, info->resource_id,
- &resource_size);
+ err = devl_resource_size_get(devlink, info->resource_id,
+ &resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
@@ -338,22 +338,22 @@ static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- mlxsw_sp1_kvdl_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- mlxsw_sp1_kvdl_single_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- mlxsw_sp1_kvdl_chunks_occ_get,
- kvdl);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- mlxsw_sp1_kvdl_large_chunks_occ_get,
- kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ mlxsw_sp1_kvdl_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ mlxsw_sp1_kvdl_single_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ mlxsw_sp1_kvdl_chunks_occ_get,
+ kvdl);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ mlxsw_sp1_kvdl_large_chunks_occ_get,
+ kvdl);
return 0;
}
@@ -362,14 +362,14 @@ static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_KVD_LINEAR);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
@@ -396,32 +396,32 @@ int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
- MLXSW_SP1_KVDL_SINGLE_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP1_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
- MLXSW_SP1_KVDL_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP1_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
- MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- &size_params);
+ err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 10ae1115de6c..24ff305a2995 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -15,7 +15,7 @@ struct mlxsw_sp2_kvdl_part_info {
* usage bits we need and how many indexes there are
* represented by a single bit. This could be got from FW
* querying appropriate resources. So have the resource
- * ids for for this purpose in partition definition.
+ * ids for this purpose in partition definition.
*/
enum mlxsw_res_id usage_bit_count_res_id;
enum mlxsw_res_id index_range_res_id;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c68fc8f7ca99..c9f1c79f3f9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1290,12 +1290,12 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_sb_mms_init;
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
- err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
- mlxsw_sp->sb->sb_size,
- ing_pool_count,
- eg_pool_count,
- MLXSW_SP_SB_ING_TC_COUNT,
- MLXSW_SP_SB_EG_TC_COUNT);
+ err = devl_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ mlxsw_sp->sb->sb_size,
+ ing_pool_count,
+ eg_pool_count,
+ MLXSW_SP_SB_ING_TC_COUNT,
+ MLXSW_SP_SB_EG_TC_COUNT);
if (err)
goto err_devlink_sb_register;
@@ -1314,7 +1314,7 @@ err_sb_ports_init:
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
- devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
+ devl_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
mlxsw_sp_sb_ports_fini(mlxsw_sp);
kfree(mlxsw_sp->sb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index fc2257753b9b..ee59c79156e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -67,16 +67,16 @@ static int mlxsw_sp_counter_sub_pools_init(struct mlxsw_sp *mlxsw_sp)
return -EIO;
sub_pool->entry_size = mlxsw_core_res_get(mlxsw_sp->core,
res_id);
- err = devlink_resource_size_get(devlink,
- sub_pool->resource_id,
- &sub_pool->size);
+ err = devl_resource_size_get(devlink,
+ sub_pool->resource_id,
+ &sub_pool->size);
if (err)
goto err_resource_size_get;
- devlink_resource_occ_get_register(devlink,
- sub_pool->resource_id,
- mlxsw_sp_counter_sub_pool_occ_get,
- sub_pool);
+ devl_resource_occ_get_register(devlink,
+ sub_pool->resource_id,
+ mlxsw_sp_counter_sub_pool_occ_get,
+ sub_pool);
sub_pool->base_index = base_index;
base_index += sub_pool->size;
@@ -88,8 +88,8 @@ err_resource_size_get:
for (i--; i >= 0; i--) {
sub_pool = &pool->sub_pools[i];
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
return err;
}
@@ -105,8 +105,8 @@ static void mlxsw_sp_counter_sub_pools_fini(struct mlxsw_sp *mlxsw_sp)
sub_pool = &pool->sub_pools[i];
WARN_ON(atomic_read(&sub_pool->active_entries_count));
- devlink_resource_occ_get_unregister(devlink,
- sub_pool->resource_id);
+ devl_resource_occ_get_unregister(devlink,
+ sub_pool->resource_id);
}
}
@@ -135,12 +135,12 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
spin_lock_init(&pool->counter_pool_lock);
atomic_set(&pool->active_entries_count, 0);
- err = devlink_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- &pool->pool_size);
+ err = devl_resource_size_get(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ &pool->pool_size);
if (err)
goto err_pool_resource_size_get;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
- mlxsw_sp_counter_pool_occ_get, pool);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
+ mlxsw_sp_counter_pool_occ_get, pool);
pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
@@ -157,8 +157,8 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
err_sub_pools_init:
bitmap_free(pool->usage);
err_usage_alloc:
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
err_pool_resource_size_get:
kfree(pool);
return err;
@@ -174,8 +174,8 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_COUNTERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
}
@@ -262,12 +262,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, pool_size,
pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- MLXSW_SP_RESOURCE_NAME_COUNTERS,
- pool_size,
- MLXSW_SP_RESOURCE_COUNTERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink,
+ MLXSW_SP_RESOURCE_NAME_COUNTERS,
+ pool_size,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
@@ -287,12 +287,12 @@ int mlxsw_sp_counter_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, sub_pool_size,
sub_pool_size, bank_size,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink,
- sub_pool->resource_name,
- sub_pool_size,
- sub_pool->resource_id,
- MLXSW_SP_RESOURCE_COUNTERS,
- &size_params);
+ err = devl_resource_register(devlink,
+ sub_pool->resource_name,
+ sub_pool_size,
+ sub_pool->resource_id,
+ MLXSW_SP_RESOURCE_COUNTERS,
+ &size_params);
if (err)
return err;
total_bank_config += sub_pool->bank_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5d494fabf93d..5416093c0e35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -295,17 +295,17 @@ static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- return devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
- &mlxsw_sp_erif_ops,
- mlxsw_sp, false);
+ return devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ERIF,
+ &mlxsw_sp_erif_ops,
+ mlxsw_sp, false);
}
static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
+ devl_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF);
}
static int mlxsw_sp_dpipe_table_host_matches_dump(struct sk_buff *skb, int type)
@@ -749,25 +749,25 @@ static int mlxsw_sp_dpipe_host4_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- &mlxsw_sp_host4_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ &mlxsw_sp_host4_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
- MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4,
+ MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST4);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
return err;
}
@@ -775,8 +775,8 @@ static void mlxsw_sp_dpipe_host4_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST4);
}
static int
@@ -826,25 +826,25 @@ static int mlxsw_sp_dpipe_host6_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- &mlxsw_sp_host6_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ &mlxsw_sp_host6_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
- MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6,
+ MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_HOST6);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
return err;
}
@@ -852,8 +852,8 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
@@ -1231,25 +1231,25 @@ static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_table_register(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- &mlxsw_sp_dpipe_table_adj_ops,
- mlxsw_sp, false);
+ err = devl_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
if (err)
return err;
- err = devlink_dpipe_table_resource_set(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
- MLXSW_SP_RESOURCE_KVD_LINEAR,
- MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
+ err = devl_dpipe_table_resource_set(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ MLXSW_SP_DPIPE_TABLE_RESOURCE_UNIT_ADJ);
if (err)
goto err_resource_set;
return 0;
err_resource_set:
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
return err;
}
@@ -1257,8 +1257,8 @@ static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
- devlink_dpipe_table_unregister(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+ devl_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
}
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
@@ -1266,10 +1266,8 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int err;
- err = devlink_dpipe_headers_register(devlink,
- &mlxsw_sp_dpipe_headers);
- if (err)
- return err;
+ devl_dpipe_headers_register(devlink, &mlxsw_sp_dpipe_headers);
+
err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp);
if (err)
goto err_erif_table_init;
@@ -1294,7 +1292,7 @@ err_host6_table_init:
err_host4_table_init:
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
err_erif_table_init:
- devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
+ devl_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core));
return err;
}
@@ -1306,5 +1304,5 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
- devlink_dpipe_headers_unregister(devlink);
+ devl_dpipe_headers_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index ce80931f0402..045a24cacfa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -22,11 +22,18 @@ struct mlxsw_sp_fid_core {
unsigned int *port_fid_mappings;
};
+struct mlxsw_sp_fid_port_vid {
+ struct list_head list;
+ u16 local_port;
+ u16 vid;
+};
+
struct mlxsw_sp_fid {
struct list_head list;
struct mlxsw_sp_rif *rif;
refcount_t ref_count;
u16 fid_index;
+ u16 fid_offset;
struct mlxsw_sp_fid_family *fid_family;
struct rhash_head ht_node;
@@ -37,6 +44,7 @@ struct mlxsw_sp_fid {
int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
+ struct list_head port_vid_list; /* Ordered by local port. */
};
struct mlxsw_sp_fid_8021q {
@@ -63,7 +71,6 @@ static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
struct mlxsw_sp_flood_table {
enum mlxsw_sp_flood_type packet_type;
- enum mlxsw_reg_sfgc_bridge_type bridge_type;
enum mlxsw_flood_table_type table_type;
int table_index;
};
@@ -76,18 +83,18 @@ struct mlxsw_sp_fid_ops {
u16 *p_fid_index);
bool (*compare)(const struct mlxsw_sp_fid *fid,
const void *arg);
- u16 (*flood_index)(const struct mlxsw_sp_fid *fid);
int (*port_vid_map)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
void (*port_vid_unmap)(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *port, u16 vid);
- int (*vni_set)(struct mlxsw_sp_fid *fid, __be32 vni);
+ int (*vni_set)(struct mlxsw_sp_fid *fid);
void (*vni_clear)(struct mlxsw_sp_fid *fid);
- int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index);
+ int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
const struct net_device *nve_dev);
+ int (*vid_to_fid_rif_update)(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif);
};
struct mlxsw_sp_fid_family {
@@ -102,7 +109,10 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
- u8 lag_vid_valid:1;
+ bool flood_rsp;
+ enum mlxsw_reg_bridge_type bridge_type;
+ u16 pgt_base;
+ bool smpe_index_valid;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -137,11 +147,6 @@ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
return fid_family->start_index == fid_index;
}
-bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->lag_vid_valid;
-}
-
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
u16 fid_index)
{
@@ -206,17 +211,20 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
int err;
- if (WARN_ON(!ops->nve_flood_index_set || fid->nve_flood_index_valid))
+ if (WARN_ON(fid->nve_flood_index_valid))
return -EINVAL;
- err = ops->nve_flood_index_set(fid, nve_flood_index);
- if (err)
- return err;
-
fid->nve_flood_index = nve_flood_index;
fid->nve_flood_index_valid = true;
+ err = ops->nve_flood_index_set(fid);
+ if (err)
+ goto err_nve_flood_index_set;
return 0;
+
+err_nve_flood_index_set:
+ fid->nve_flood_index_valid = false;
+ return err;
}
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
@@ -224,7 +232,7 @@ void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
- if (WARN_ON(!ops->nve_flood_index_clear || !fid->nve_flood_index_valid))
+ if (WARN_ON(!fid->nve_flood_index_valid))
return;
fid->nve_flood_index_valid = false;
@@ -244,7 +252,7 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
int err;
- if (WARN_ON(!ops->vni_set || fid->vni_valid))
+ if (WARN_ON(fid->vni_valid))
return -EINVAL;
fid->nve_type = type;
@@ -256,15 +264,15 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
if (err)
return err;
- err = ops->vni_set(fid, vni);
+ fid->vni_valid = true;
+ err = ops->vni_set(fid);
if (err)
goto err_vni_set;
- fid->vni_valid = true;
-
return 0;
err_vni_set:
+ fid->vni_valid = false;
rhashtable_remove_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node,
mlxsw_sp_fid_vni_ht_params);
return err;
@@ -276,7 +284,7 @@ void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid)
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (WARN_ON(!ops->vni_clear || !fid->vni_valid))
+ if (WARN_ON(!fid->vni_valid))
return;
fid->vni_valid = false;
@@ -316,34 +324,43 @@ mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
return NULL;
}
+static u16
+mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
+{
+ return fid_family->end_index - fid_family->start_index + 1;
+}
+
+static u16
+mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table,
+ u16 fid_offset)
+{
+ u16 num_fids;
+
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ return fid_family->pgt_base + num_fids * flood_table->table_index +
+ fid_offset;
+}
+
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u16 local_port,
bool member)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
- const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
const struct mlxsw_sp_flood_table *flood_table;
- char *sftr2_pl;
- int err;
+ u16 mid_index;
- if (WARN_ON(!fid_family->flood_tables || !ops->flood_index))
+ if (WARN_ON(!fid_family->flood_tables))
return -EINVAL;
flood_table = mlxsw_sp_fid_flood_table_lookup(fid, packet_type);
if (!flood_table)
return -ESRCH;
- sftr2_pl = kmalloc(MLXSW_REG_SFTR2_LEN, GFP_KERNEL);
- if (!sftr2_pl)
- return -ENOMEM;
-
- mlxsw_reg_sftr2_pack(sftr2_pl, flood_table->table_index,
- ops->flood_index(fid), flood_table->table_type, 1,
- local_port, member);
- err = mlxsw_reg_write(fid_family->mlxsw_sp->core, MLXSW_REG(sftr2),
- sftr2_pl);
- kfree(sftr2_pl);
- return err;
+ mid_index = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table,
+ fid->fid_offset);
+ return mlxsw_sp_pgt_entry_port_set(fid_family->mlxsw_sp, mid_index,
+ fid->fid_index, local_port, member);
}
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
@@ -370,11 +387,6 @@ enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid)
return fid->fid_family->type;
}
-void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
-{
- fid->rif = rif;
-}
-
struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
{
return fid->rif;
@@ -405,6 +417,7 @@ static void mlxsw_sp_fid_8021q_setup(struct mlxsw_sp_fid *fid, const void *arg)
u16 vid = *(u16 *) arg;
mlxsw_sp_fid_8021q_fid(fid)->vid = vid;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
@@ -413,38 +426,341 @@ static enum mlxsw_reg_sfmr_op mlxsw_sp_sfmr_op(bool valid)
MLXSW_REG_SFMR_OP_DESTROY_FID;
}
-static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- u16 fid_offset, bool valid)
+static int mlxsw_sp_fid_op(const struct mlxsw_sp_fid *fid, bool valid)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
- mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid_index,
- fid_offset);
+ mlxsw_reg_sfmr_pack(sfmr_pl, mlxsw_sp_sfmr_op(valid), fid->fid_index,
+ fid->fid_offset, fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int mlxsw_sp_fid_vni_op(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
- __be32 vni, bool vni_valid, u32 nve_flood_index,
- bool nve_flood_index_valid)
+static int mlxsw_sp_fid_edit_op(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char sfmr_pl[MLXSW_REG_SFMR_LEN];
+ u16 smpe;
+
+ smpe = fid->fid_family->smpe_index_valid ? fid->fid_index : 0;
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+ fid->fid_index, fid->fid_offset,
+ fid->fid_family->flood_rsp,
+ fid->fid_family->bridge_type,
+ fid->fid_family->smpe_index_valid, smpe);
+ mlxsw_reg_sfmr_vv_set(sfmr_pl, fid->vni_valid);
+ mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(fid->vni));
+ mlxsw_reg_sfmr_vtfp_set(sfmr_pl, fid->nve_flood_index_valid);
+ mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, fid->nve_flood_index);
+
+ if (rif) {
+ mlxsw_reg_sfmr_irif_v_set(sfmr_pl, true);
+ mlxsw_reg_sfmr_irif_set(sfmr_pl, mlxsw_sp_rif_index(rif));
+ }
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid_index,
- 0);
- mlxsw_reg_sfmr_vv_set(sfmr_pl, vni_valid);
- mlxsw_reg_sfmr_vni_set(sfmr_pl, be32_to_cpu(vni));
- mlxsw_reg_sfmr_vtfp_set(sfmr_pl, nve_flood_index_valid);
- mlxsw_reg_sfmr_nve_tunnel_flood_ptr_set(sfmr_pl, nve_flood_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static int __mlxsw_sp_fid_port_vid_map(struct mlxsw_sp *mlxsw_sp, u16 fid_index,
+static int mlxsw_sp_fid_vni_to_fid_map(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif,
+ bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vni_pack(svfa_pl, valid, fid->fid_index,
+ be32_to_cpu(fid->vni), irif_valid, irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return mlxsw_sp_fid_edit_op(fid, rif);
+}
+
+static int mlxsw_sp_fid_vni_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ if (!fid->vni_valid)
+ return 0;
+
+ return mlxsw_sp_fid_vni_to_fid_map(fid, rif, fid->vni_valid);
+}
+
+static int
+mlxsw_sp_fid_vid_to_fid_map(const struct mlxsw_sp_fid *fid, u16 vid, bool valid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid;
+ u16 irif_index;
+
+ irif_valid = !!rif;
+ irif_index = rif ? mlxsw_sp_rif_index(rif) : 0;
+
+ mlxsw_reg_svfa_vid_pack(svfa_pl, valid, fid->fid_index, vid, irif_valid,
+ irif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int
+mlxsw_sp_fid_8021q_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ /* Update the global VID => FID mapping we created when the FID was
+ * configured.
+ */
+ return mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, rif);
+}
+
+static int
+mlxsw_sp_fid_port_vid_to_fid_rif_update_one(const struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_fid_port_vid *pv,
+ bool irif_valid, u16 irif_index)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, pv->local_port, true,
+ fid->fid_index, pv->vid, irif_valid,
+ irif_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_fid_vid_to_fid_rif_set(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+ u16 irif_index;
+ int err;
+
+ err = fid->fid_family->ops->vid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ irif_index = mlxsw_sp_rif_index(rif);
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated with the
+ * ingress RIF.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ err = mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv,
+ true,
+ irif_index);
+ if (err)
+ goto err_port_vid_to_fid_rif_update_one;
+ }
+
+ return 0;
+
+err_port_vid_to_fid_rif_update_one:
+ list_for_each_entry_continue_reverse(pv, &fid->port_vid_list, list) {
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+static void mlxsw_sp_fid_vid_to_fid_rif_unset(const struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *pv;
+
+ list_for_each_entry(pv, &fid->port_vid_list, list) {
+ /* If port is not in virtual mode, then it does not have any
+ * {Port, VID}->FID mappings that need to be updated.
+ */
+ if (!mlxsw_sp->fid_core->port_fid_mappings[pv->local_port])
+ continue;
+
+ mlxsw_sp_fid_port_vid_to_fid_rif_update_one(fid, pv, false, 0);
+ }
+
+ fid->fid_family->ops->vid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_reiv_handle(struct mlxsw_sp_fid *fid, u16 rif_index,
+ bool valid, u8 port_page)
+{
+ u16 local_port_end = (port_page + 1) * MLXSW_REG_REIV_REC_MAX_COUNT - 1;
+ u16 local_port_start = port_page * MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ struct mlxsw_sp_fid_port_vid *port_vid;
+ u8 rec_num, entries_num = 0;
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+
+ list_for_each_entry(port_vid, &fid->port_vid_list, list) {
+ /* port_vid_list is sorted by local_port. */
+ if (port_vid->local_port < local_port_start)
+ continue;
+
+ if (port_vid->local_port > local_port_end)
+ break;
+
+ rec_num = port_vid->local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num,
+ valid ? port_vid->vid : 0);
+ entries_num++;
+ }
+
+ if (!entries_num) {
+ kfree(reiv_pl);
+ return 0;
+ }
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ if (err)
+ goto err_reg_write;
+
+ kfree(reiv_pl);
+ return 0;
+
+err_reg_write:
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_erif_eport_to_vid_map(struct mlxsw_sp_fid *fid,
+ u16 rif_index, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u8 num_port_pages;
+ int err, i;
+
+ num_port_pages = mlxsw_core_max_ports(mlxsw_sp->core) /
+ MLXSW_REG_REIV_REC_MAX_COUNT + 1;
+
+ for (i = 0; i < num_port_pages; i++) {
+ err = mlxsw_sp_fid_reiv_handle(fid, rif_index, valid, i);
+ if (err)
+ goto err_reiv_handle;
+ }
+
+ return 0;
+
+err_reiv_handle:
+ for (; i >= 0; i--)
+ mlxsw_sp_fid_reiv_handle(fid, rif_index, !valid, i);
+ return err;
+}
+
+int mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
+{
+ u16 rif_index = mlxsw_sp_rif_index(rif);
+ int err;
+
+ err = mlxsw_sp_fid_to_fid_rif_update(fid, rif);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni_to_fid_rif_update(fid, rif);
+ if (err)
+ goto err_vni_to_fid_rif_update;
+
+ err = mlxsw_sp_fid_vid_to_fid_rif_set(fid, rif);
+ if (err)
+ goto err_vid_to_fid_rif_set;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, true);
+ if (err)
+ goto err_erif_eport_to_vid_map;
+
+ fid->rif = rif;
+ return 0;
+
+err_erif_eport_to_vid_map:
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+err_vid_to_fid_rif_set:
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+err_vni_to_fid_rif_update:
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+ return err;
+}
+
+void mlxsw_sp_fid_rif_unset(struct mlxsw_sp_fid *fid)
+{
+ u16 rif_index;
+
+ if (!fid->rif)
+ return;
+
+ rif_index = mlxsw_sp_rif_index(fid->rif);
+ fid->rif = NULL;
+
+ mlxsw_sp_fid_erif_eport_to_vid_map(fid, rif_index, false);
+ mlxsw_sp_fid_vid_to_fid_rif_unset(fid);
+ mlxsw_sp_fid_vni_to_fid_rif_update(fid, NULL);
+ mlxsw_sp_fid_to_fid_rif_update(fid, NULL);
+}
+
+static int mlxsw_sp_fid_vni_op(const struct mlxsw_sp_fid *fid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, fid->vni_valid);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_edit_op(fid, fid->rif);
+ if (err)
+ goto err_fid_edit_op;
+
+ return 0;
+
+err_fid_edit_op:
+ mlxsw_sp_fid_vni_to_fid_map(fid, fid->rif, !fid->vni_valid);
+ return err;
+}
+
+static int __mlxsw_sp_fid_port_vid_map(const struct mlxsw_sp_fid *fid,
u16 local_port, u16 vid, bool valid)
{
- enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
char svfa_pl[MLXSW_REG_SVFA_LEN];
+ bool irif_valid = false;
+ u16 irif_index = 0;
+
+ if (fid->rif) {
+ irif_valid = true;
+ irif_index = mlxsw_sp_rif_index(fid->rif);
+ }
- mlxsw_reg_svfa_pack(svfa_pl, local_port, mt, valid, fid_index, vid);
+ mlxsw_reg_svfa_port_vid_pack(svfa_pl, local_port, valid, fid->fid_index,
+ vid, irif_valid, irif_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}
@@ -459,20 +775,19 @@ static void mlxsw_sp_fid_8021d_setup(struct mlxsw_sp_fid *fid, const void *arg)
int br_ifindex = *(int *) arg;
mlxsw_sp_fid_8021d_fid(fid)->br_ifindex = br_ifindex;
+ fid->fid_offset = fid->fid_index - fid->fid_family->start_index;
}
static int mlxsw_sp_fid_8021d_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_op(fid_family->mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_8021d_deconfigure(struct mlxsw_sp_fid *fid)
{
if (fid->vni_valid)
mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_8021d_index_alloc(struct mlxsw_sp_fid *fid,
@@ -498,14 +813,8 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
return mlxsw_sp_fid_8021d_fid(fid)->br_ifindex == br_ifindex;
}
-static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_index - VLAN_N_VID;
-}
-
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
int err;
@@ -517,7 +826,7 @@ static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
+ err = __mlxsw_sp_fid_port_vid_map(fid,
mlxsw_sp_port->local_port,
vid, true);
if (err)
@@ -540,8 +849,7 @@ err_fid_port_vid_map:
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
return err;
@@ -549,7 +857,6 @@ err_fid_port_vid_map:
static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -562,12 +869,108 @@ static void mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
if (!fid)
continue;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid,
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
false);
}
}
+static int
+mlxsw_sp_fid_port_vid_list_add(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp_port_vid;
+
+ port_vid = kzalloc(sizeof(*port_vid), GFP_KERNEL);
+ if (!port_vid)
+ return -ENOMEM;
+
+ port_vid->local_port = local_port;
+ port_vid->vid = vid;
+
+ list_for_each_entry(tmp_port_vid, &fid->port_vid_list, list) {
+ if (tmp_port_vid->local_port > local_port)
+ break;
+ }
+
+ list_add_tail(&port_vid->list, &tmp_port_vid->list);
+ return 0;
+}
+
+static void
+mlxsw_sp_fid_port_vid_list_del(struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid)
+{
+ struct mlxsw_sp_fid_port_vid *port_vid, *tmp;
+
+ list_for_each_entry_safe(port_vid, tmp, &fid->port_vid_list, list) {
+ if (port_vid->local_port != local_port || port_vid->vid != vid)
+ continue;
+
+ list_del(&port_vid->list);
+ kfree(port_vid);
+ return;
+ }
+}
+
+static int
+mlxsw_sp_fid_mpe_table_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ char smpe_pl[MLXSW_REG_SMPE_LEN];
+
+ mlxsw_reg_smpe_pack(smpe_pl, local_port, fid->fid_index,
+ valid ? vid : 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smpe), smpe_pl);
+}
+
+static int
+mlxsw_sp_fid_erif_eport_to_vid_map_one(const struct mlxsw_sp_fid *fid,
+ u16 local_port, u16 vid, bool valid)
+{
+ u8 port_page = local_port / MLXSW_REG_REIV_REC_MAX_COUNT;
+ u8 rec_num = local_port % MLXSW_REG_REIV_REC_MAX_COUNT;
+ struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
+ u16 rif_index = mlxsw_sp_rif_index(fid->rif);
+ char *reiv_pl;
+ int err;
+
+ reiv_pl = kmalloc(MLXSW_REG_REIV_LEN, GFP_KERNEL);
+ if (!reiv_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_reiv_pack(reiv_pl, port_page, rif_index);
+ mlxsw_reg_reiv_rec_update_set(reiv_pl, rec_num, true);
+ mlxsw_reg_reiv_rec_evid_set(reiv_pl, rec_num, valid ? vid : 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(reiv), reiv_pl);
+ kfree(reiv_pl);
+ return err;
+}
+
+static int mlxsw_sp_fid_evid_map(const struct mlxsw_sp_fid *fid, u16 local_port,
+ u16 vid, bool valid)
+{
+ int err;
+
+ err = mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, valid);
+ if (err)
+ return err;
+
+ if (!fid->rif)
+ return 0;
+
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ valid);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+
+ return 0;
+
+err_erif_eport_to_vid_map_one:
+ mlxsw_sp_fid_mpe_table_map(fid, local_port, vid, !valid);
+ return err;
+}
+
static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
@@ -576,11 +979,20 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- err = __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, true);
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
if (err)
return err;
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -591,8 +1003,11 @@ static int mlxsw_sp_fid_8021d_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
return err;
}
@@ -606,43 +1021,29 @@ mlxsw_sp_fid_8021d_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
- __mlxsw_sp_fid_port_vid_map(mlxsw_sp, fid->fid_index,
- mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
}
-static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+static int mlxsw_sp_fid_8021d_vni_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, vni,
- true, fid->nve_flood_index,
- fid->nve_flood_index_valid);
+ return mlxsw_sp_fid_vni_op(fid);
}
static void mlxsw_sp_fid_8021d_vni_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, 0, false,
- fid->nve_flood_index, fid->nve_flood_index_valid);
+ mlxsw_sp_fid_vni_op(fid);
}
-static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid,
- u32 nve_flood_index)
+static int mlxsw_sp_fid_8021d_nve_flood_index_set(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- return mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index,
- fid->vni, fid->vni_valid, nve_flood_index,
- true);
+ return mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
-
- mlxsw_sp_fid_vni_op(fid_family->mlxsw_sp, fid->fid_index, fid->vni,
- fid->vni_valid, 0, false);
+ mlxsw_sp_fid_edit_op(fid, fid->rif);
}
static void
@@ -652,13 +1053,19 @@ mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, 0);
}
+static int
+mlxsw_sp_fid_8021d_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
.deconfigure = mlxsw_sp_fid_8021d_deconfigure,
.index_alloc = mlxsw_sp_fid_8021d_index_alloc,
.compare = mlxsw_sp_fid_8021d_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
.port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
.vni_set = mlxsw_sp_fid_8021d_vni_set,
@@ -666,42 +1073,32 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
.fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021d_vid_to_fid_rif_update,
};
+#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
+#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
+#define MLXSW_SP_FID_8021Q_PGT_BASE 0
+#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
+
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
.packet_type = MLXSW_SP_FLOOD_TYPE_UC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 0,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_MC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 1,
},
{
.packet_type = MLXSW_SP_FLOOD_TYPE_BC,
- .bridge_type = MLXSW_REG_SFGC_BRIDGE_TYPE_VFID,
- .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID,
+ .table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFSET,
.table_index = 2,
},
};
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
- .type = MLXSW_SP_FID_TYPE_8021D,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
- .start_index = VLAN_N_VID,
- .end_index = VLAN_N_VID + MLXSW_SP_FID_8021D_MAX - 1,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_FID,
- .ops = &mlxsw_sp_fid_8021d_ops,
- .lag_vid_valid = 1,
-};
-
static bool
mlxsw_sp_fid_8021q_compare(const struct mlxsw_sp_fid *fid, const void *arg)
{
@@ -717,48 +1114,19 @@ mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
}
-static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
- .setup = mlxsw_sp_fid_8021q_setup,
- .configure = mlxsw_sp_fid_8021d_configure,
- .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
- .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
- .compare = mlxsw_sp_fid_8021q_compare,
- .flood_index = mlxsw_sp_fid_8021d_flood_index,
- .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
- .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
- .vni_set = mlxsw_sp_fid_8021d_vni_set,
- .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
- .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
- .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
- .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
-};
-
-/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
-#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
-#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
- VLAN_VID_MASK - 2)
-
-/* Range and flood configuration must match mlxsw_config_profile */
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
- .type = MLXSW_SP_FID_TYPE_8021Q,
- .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
- .start_index = MLXSW_SP_FID_8021Q_EMU_START,
- .end_index = MLXSW_SP_FID_8021Q_EMU_END,
- .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
- .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
- .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
- .ops = &mlxsw_sp_fid_8021q_emu_ops,
- .lag_vid_valid = 1,
-};
+static void mlxsw_sp_fid_rfid_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
{
- /* rFIDs are allocated by the device during init */
- return 0;
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_rfid_deconfigure(struct mlxsw_sp_fid *fid)
{
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_rfid_index_alloc(struct mlxsw_sp_fid *fid,
@@ -787,9 +1155,28 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
u16 local_port = mlxsw_sp_port->local_port;
int err;
- /* We only need to transition the port to virtual mode since
- * {Port, VID} => FID is done by the firmware upon RIF creation.
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ return err;
+
+ /* Using legacy bridge model, we only need to transition the port to
+ * virtual mode since {Port, VID} => FID is done by the firmware upon
+ * RIF creation. Using unified bridge model, we need to map
+ * {Port, VID} => FID and map egress VID.
*/
+ err = __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid,
+ true);
+ if (err)
+ goto err_port_vid_map;
+
+ if (fid->rif) {
+ err = mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port,
+ vid, true);
+ if (err)
+ goto err_erif_eport_to_vid_map_one;
+ }
+
if (mlxsw_sp->fid_core->port_fid_mappings[local_port]++ == 0) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
if (err)
@@ -800,6 +1187,13 @@ static int mlxsw_sp_fid_rfid_port_vid_map(struct mlxsw_sp_fid *fid,
err_port_vp_mode_trans:
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+err_erif_eport_to_vid_map_one:
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+err_port_vid_map:
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
return err;
}
@@ -813,39 +1207,69 @@ mlxsw_sp_fid_rfid_port_vid_unmap(struct mlxsw_sp_fid *fid,
if (mlxsw_sp->fid_core->port_fid_mappings[local_port] == 1)
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp->fid_core->port_fid_mappings[local_port]--;
+
+ if (fid->rif)
+ mlxsw_sp_fid_erif_eport_to_vid_map_one(fid, local_port, vid,
+ false);
+ __mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port->local_port, vid, false);
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+}
+
+static int mlxsw_sp_fid_rfid_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_rfid_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_rfid_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int
+mlxsw_sp_fid_rfid_vid_to_fid_rif_update(const struct mlxsw_sp_fid *fid,
+ const struct mlxsw_sp_rif *rif)
+{
+ return 0;
}
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_rfid_ops = {
+ .setup = mlxsw_sp_fid_rfid_setup,
.configure = mlxsw_sp_fid_rfid_configure,
.deconfigure = mlxsw_sp_fid_rfid_deconfigure,
.index_alloc = mlxsw_sp_fid_rfid_index_alloc,
.compare = mlxsw_sp_fid_rfid_compare,
.port_vid_map = mlxsw_sp_fid_rfid_port_vid_map,
.port_vid_unmap = mlxsw_sp_fid_rfid_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_rfid_vni_set,
+ .vni_clear = mlxsw_sp_fid_rfid_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_rfid_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_rfid_nve_flood_index_clear,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_rfid_vid_to_fid_rif_update,
};
-#define MLXSW_SP_RFID_BASE (15 * 1024)
-#define MLXSW_SP_RFID_MAX 1024
-
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
- .type = MLXSW_SP_FID_TYPE_RFID,
- .fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = MLXSW_SP_RFID_BASE,
- .end_index = MLXSW_SP_RFID_BASE + MLXSW_SP_RFID_MAX - 1,
- .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
- .ops = &mlxsw_sp_fid_rfid_ops,
-};
+static void mlxsw_sp_fid_dummy_setup(struct mlxsw_sp_fid *fid, const void *arg)
+{
+ fid->fid_offset = 0;
+}
static int mlxsw_sp_fid_dummy_configure(struct mlxsw_sp_fid *fid)
{
- struct mlxsw_sp *mlxsw_sp = fid->fid_family->mlxsw_sp;
-
- return mlxsw_sp_fid_op(mlxsw_sp, fid->fid_index, 0, true);
+ return mlxsw_sp_fid_op(fid, true);
}
static void mlxsw_sp_fid_dummy_deconfigure(struct mlxsw_sp_fid *fid)
{
- mlxsw_sp_fid_op(fid->fid_family->mlxsw_sp, fid->fid_index, 0, false);
+ mlxsw_sp_fid_op(fid, false);
}
static int mlxsw_sp_fid_dummy_index_alloc(struct mlxsw_sp_fid *fid,
@@ -862,26 +1286,252 @@ static bool mlxsw_sp_fid_dummy_compare(const struct mlxsw_sp_fid *fid,
return true;
}
+static int mlxsw_sp_fid_dummy_vni_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_vni_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
+static int mlxsw_sp_fid_dummy_nve_flood_index_set(struct mlxsw_sp_fid *fid)
+{
+ return -EOPNOTSUPP;
+}
+
+static void mlxsw_sp_fid_dummy_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
+{
+ WARN_ON_ONCE(1);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
+ .setup = mlxsw_sp_fid_dummy_setup,
.configure = mlxsw_sp_fid_dummy_configure,
.deconfigure = mlxsw_sp_fid_dummy_deconfigure,
.index_alloc = mlxsw_sp_fid_dummy_index_alloc,
.compare = mlxsw_sp_fid_dummy_compare,
+ .vni_set = mlxsw_sp_fid_dummy_vni_set,
+ .vni_clear = mlxsw_sp_fid_dummy_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_dummy_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_dummy_nve_flood_index_clear,
+};
+
+static int mlxsw_sp_fid_8021q_configure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+ int err;
+
+ err = mlxsw_sp_fid_op(fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, true, fid->rif);
+ if (err)
+ goto err_vid_to_fid_map;
+
+ return 0;
+
+err_vid_to_fid_map:
+ mlxsw_sp_fid_op(fid, false);
+ return err;
+}
+
+static void mlxsw_sp_fid_8021q_deconfigure(struct mlxsw_sp_fid *fid)
+{
+ struct mlxsw_sp_fid_8021q *fid_8021q = mlxsw_sp_fid_8021q_fid(fid);
+
+ if (fid->vni_valid)
+ mlxsw_sp_nve_fid_disable(fid->fid_family->mlxsw_sp, fid);
+
+ mlxsw_sp_fid_vid_to_fid_map(fid, fid_8021q->vid, false, NULL);
+ mlxsw_sp_fid_op(fid, false);
+}
+
+static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ int err;
+
+ /* In case there are no {Port, VID} => FID mappings on the port,
+ * we can use the global VID => FID mapping we created when the
+ * FID was configured, otherwise, configure new mapping.
+ */
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port]) {
+ err = __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, true);
+ if (err)
+ return err;
+ }
+
+ err = mlxsw_sp_fid_evid_map(fid, local_port, vid, true);
+ if (err)
+ goto err_fid_evid_map;
+
+ err = mlxsw_sp_fid_port_vid_list_add(fid, mlxsw_sp_port->local_port,
+ vid);
+ if (err)
+ goto err_port_vid_list_add;
+
+ return 0;
+
+err_port_vid_list_add:
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+err_fid_evid_map:
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+ if (mlxsw_sp->fid_core->port_fid_mappings[local_port])
+ __mlxsw_sp_fid_port_vid_map(fid, local_port, vid, false);
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021q_configure,
+ .deconfigure = mlxsw_sp_fid_8021q_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .port_vid_map = mlxsw_sp_fid_8021q_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021q_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+ .vid_to_fid_rif_update = mlxsw_sp_fid_8021q_vid_to_fid_rif_update,
+};
+
+/* There are 4K-2 802.1Q FIDs */
+#define MLXSW_SP_FID_8021Q_START 1 /* FID 0 is reserved. */
+#define MLXSW_SP_FID_8021Q_END (MLXSW_SP_FID_8021Q_START + \
+ MLXSW_SP_FID_8021Q_MAX - 1)
+
+/* There are 1K 802.1D FIDs */
+#define MLXSW_SP_FID_8021D_START (MLXSW_SP_FID_8021Q_END + 1)
+#define MLXSW_SP_FID_8021D_END (MLXSW_SP_FID_8021D_START + \
+ MLXSW_SP_FID_8021D_MAX - 1)
+
+/* There is one dummy FID */
+#define MLXSW_SP_FID_DUMMY (MLXSW_SP_FID_8021D_END + 1)
+
+/* There are 11K rFIDs */
+#define MLXSW_SP_RFID_START (MLXSW_SP_FID_DUMMY + 1)
+#define MLXSW_SP_RFID_END (MLXSW_SP_RFID_START + \
+ MLXSW_SP_FID_RFID_MAX - 1)
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_dummy_family = {
+ .type = MLXSW_SP_FID_TYPE_DUMMY,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
+ .ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_rfid_family = {
+ .type = MLXSW_SP_FID_TYPE_RFID,
+ .fid_size = sizeof(struct mlxsw_sp_fid),
+ .start_index = MLXSW_SP_RFID_START,
+ .end_index = MLXSW_SP_RFID_END,
+ .rif_type = MLXSW_SP_RIF_TYPE_SUBPORT,
+ .ops = &mlxsw_sp_fid_rfid_ops,
+ .flood_rsp = true,
+ .smpe_index_valid = false,
+};
+
+const struct mlxsw_sp_fid_family *mlxsw_sp1_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp1_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp1_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp1_fid_dummy_family,
+ [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_START,
+ .end_index = MLXSW_SP_FID_8021Q_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_ops,
+ .flood_rsp = false,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
+ .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
+ .smpe_index_valid = true,
};
-static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
+ .type = MLXSW_SP_FID_TYPE_8021D,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021d),
+ .start_index = MLXSW_SP_FID_8021D_START,
+ .end_index = MLXSW_SP_FID_8021D_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_FID,
+ .ops = &mlxsw_sp_fid_8021d_ops,
+ .bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
+ .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
+ .smpe_index_valid = true,
+};
+
+static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_dummy_family = {
.type = MLXSW_SP_FID_TYPE_DUMMY,
.fid_size = sizeof(struct mlxsw_sp_fid),
- .start_index = VLAN_N_VID - 1,
- .end_index = VLAN_N_VID - 1,
+ .start_index = MLXSW_SP_FID_DUMMY,
+ .end_index = MLXSW_SP_FID_DUMMY,
.ops = &mlxsw_sp_fid_dummy_ops,
+ .smpe_index_valid = false,
};
-static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
- [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
+const struct mlxsw_sp_fid_family *mlxsw_sp2_fid_family_arr[] = {
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp2_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp2_fid_8021d_family,
+ [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp2_fid_dummy_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
- [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
};
static struct mlxsw_sp_fid *mlxsw_sp_fid_lookup(struct mlxsw_sp *mlxsw_sp,
@@ -919,6 +1569,8 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid = kzalloc(fid_family->fid_size, GFP_KERNEL);
if (!fid)
return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&fid->port_vid_list);
fid->fid_family = fid_family;
err = fid->fid_family->ops->index_alloc(fid, arg, &fid_index);
@@ -927,8 +1579,7 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
fid->fid_index = fid_index;
__set_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap);
- if (fid->fid_family->ops->setup)
- fid->fid_family->ops->setup(fid, arg);
+ fid->fid_family->ops->setup(fid, arg);
err = fid->fid_family->ops->configure(fid);
if (err)
@@ -967,6 +1618,7 @@ void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
fid->fid_family->ops->deconfigure(fid);
__clear_bit(fid->fid_index - fid_family->start_index,
fid_family->fids_bitmap);
+ WARN_ON_ONCE(!list_empty(&fid->port_vid_list));
kfree(fid);
}
@@ -1010,26 +1662,49 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table)
{
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- int i;
+ u16 num_fids, mid_base;
+ int err, i;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
+ if (err)
+ return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
char sfgc_pl[MLXSW_REG_SFGC_LEN];
- int err;
if (!sfgc_packet_types[i])
continue;
- mlxsw_reg_sfgc_pack(sfgc_pl, i, flood_table->bridge_type,
- flood_table->table_type,
- flood_table->table_index);
+
+ mlxsw_reg_sfgc_pack(sfgc_pl, i, fid_family->bridge_type,
+ flood_table->table_type, 0, mid_base);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- return err;
+ goto err_reg_write;
}
return 0;
+
+err_reg_write:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
+ return err;
+}
+
+static void
+mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
+ const struct mlxsw_sp_flood_table *flood_table)
+{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 num_fids, mid_base;
+
+ mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
+ num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
@@ -1050,6 +1725,19 @@ mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
return 0;
}
+static void
+mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
+{
+ int i;
+
+ for (i = 0; i < fid_family->nr_flood_tables; i++) {
+ const struct mlxsw_sp_flood_table *flood_table;
+
+ flood_table = &fid_family->flood_tables[i];
+ mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
+ }
+}
+
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fid_family *tmpl)
{
@@ -1091,6 +1779,10 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid_family *fid_family)
{
mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL;
+
+ if (fid_family->flood_tables)
+ mlxsw_sp_fid_flood_tables_fini(fid_family);
+
bitmap_free(fid_family->fids_bitmap);
WARN_ON_ONCE(!list_empty(&fid_family->fids_list));
kfree(fid_family);
@@ -1144,7 +1836,7 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < MLXSW_SP_FID_TYPE_MAX; i++) {
err = mlxsw_sp_fid_family_register(mlxsw_sp,
- mlxsw_sp_fid_family_arr[i]);
+ mlxsw_sp->fid_family_arr[i]);
if (err)
goto err_fid_ops_register;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
new file mode 100644
index 000000000000..7dd3dba0fa83
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/refcount.h>
+#include <linux/idr.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+struct mlxsw_sp_pgt {
+ struct idr pgt_idr;
+ u16 end_index; /* Exclusive. */
+ struct mutex lock; /* Protects PGT. */
+ bool smpe_index_valid;
+};
+
+struct mlxsw_sp_pgt_entry {
+ struct list_head ports_list;
+ u16 index;
+ u16 smpe_index;
+};
+
+struct mlxsw_sp_pgt_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+};
+
+int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid)
+{
+ int index, err = 0;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0,
+ mlxsw_sp->pgt->end_index, GFP_KERNEL);
+
+ if (index < 0) {
+ err = index;
+ goto err_idr_alloc;
+ }
+
+ *p_mid = index;
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
+{
+ mutex_lock(&mlxsw_sp->pgt->lock);
+ WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base));
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int
+mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ unsigned int idr_cursor;
+ int i, err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ /* This function is supposed to be called several times as part of
+ * driver init, in specific order. Verify that the mid_index is the
+ * first free index in the idr, to be able to free the indexes in case
+ * of error.
+ */
+ idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
+ if (WARN_ON(idr_cursor != mid_base)) {
+ err = -EINVAL;
+ goto err_idr_cursor;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
+ mid_base, mid_base + count, GFP_KERNEL);
+ if (err < 0)
+ goto err_idr_alloc_cyclic;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_idr_alloc_cyclic:
+ for (i--; i >= 0; i--)
+ idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
+err_idr_cursor:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+void
+mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+{
+ struct idr *pgt_idr = &mlxsw_sp->pgt->pgt_idr;
+ int i;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ for (i = 0; i < count; i++)
+ WARN_ON_ONCE(idr_remove(pgt_idr, mid_base + i));
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_lookup(struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+
+ list_for_each_entry(pgt_entry_port, &pgt_entry->ports_list, list) {
+ if (pgt_entry_port->local_port == local_port)
+ return pgt_entry_port;
+ }
+
+ return NULL;
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ void *ret;
+ int err;
+
+ pgt_entry = kzalloc(sizeof(*pgt_entry), GFP_KERNEL);
+ if (!pgt_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ret = idr_replace(&pgt->pgt_idr, pgt_entry, mid);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto err_idr_replace;
+ }
+
+ INIT_LIST_HEAD(&pgt_entry->ports_list);
+ pgt_entry->index = mid;
+ pgt_entry->smpe_index = smpe;
+ return pgt_entry;
+
+err_idr_replace:
+ kfree(pgt_entry);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt,
+ struct mlxsw_sp_pgt_entry *pgt_entry)
+{
+ WARN_ON(!list_empty(&pgt_entry->ports_list));
+
+ pgt_entry = idr_replace(&pgt->pgt_idr, NULL, pgt_entry->index);
+ if (WARN_ON(IS_ERR(pgt_entry)))
+ return;
+
+ kfree(pgt_entry);
+}
+
+static struct mlxsw_sp_pgt_entry *
+mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (pgt_entry)
+ return pgt_entry;
+
+ return mlxsw_sp_pgt_entry_create(pgt, mid, smpe);
+}
+
+static void mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid)
+{
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ pgt_entry = idr_find(&pgt->pgt_idr, mid);
+ if (WARN_ON(!pgt_entry))
+ return;
+
+ if (list_empty(&pgt_entry->ports_list))
+ mlxsw_sp_pgt_entry_destroy(pgt, pgt_entry);
+}
+
+static void mlxsw_sp_pgt_smid2_port_set(char *smid2_pl, u16 local_port,
+ bool member)
+{
+ mlxsw_reg_smid2_port_set(smid2_pl, local_port, member);
+ mlxsw_reg_smid2_port_mask_set(smid2_pl, local_port, 1);
+}
+
+static int
+mlxsw_sp_pgt_entry_port_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port, bool member)
+{
+ char *smid2_pl;
+ int err;
+
+ smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
+ if (!smid2_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid2_pack(smid2_pl, pgt_entry->index, 0, 0,
+ mlxsw_sp->pgt->smpe_index_valid,
+ pgt_entry->smpe_index);
+
+ mlxsw_sp_pgt_smid2_port_set(smid2_pl, local_port, member);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
+
+ kfree(smid2_pl);
+
+ return err;
+}
+
+static struct mlxsw_sp_pgt_entry_port *
+mlxsw_sp_pgt_entry_port_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ int err;
+
+ pgt_entry_port = kzalloc(sizeof(*pgt_entry_port), GFP_KERNEL);
+ if (!pgt_entry_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry, local_port,
+ true);
+ if (err)
+ goto err_pgt_entry_port_write;
+
+ pgt_entry_port->local_port = local_port;
+ list_add(&pgt_entry_port->list, &pgt_entry->ports_list);
+
+ return pgt_entry_port;
+
+err_pgt_entry_port_write:
+ kfree(pgt_entry_port);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_pgt_entry_port_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_pgt_entry *pgt_entry,
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port)
+
+{
+ list_del(&pgt_entry_port->list);
+ mlxsw_sp_pgt_entry_port_write(mlxsw_sp, pgt_entry,
+ pgt_entry_port->local_port, false);
+ kfree(pgt_entry_port);
+}
+
+static int mlxsw_sp_pgt_entry_port_add(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+ int err;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = mlxsw_sp_pgt_entry_get(mlxsw_sp->pgt, mid, smpe);
+ if (IS_ERR(pgt_entry)) {
+ err = PTR_ERR(pgt_entry);
+ goto err_pgt_entry_get;
+ }
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_create(mlxsw_sp, pgt_entry,
+ local_port);
+ if (IS_ERR(pgt_entry_port)) {
+ err = PTR_ERR(pgt_entry_port);
+ goto err_pgt_entry_port_get;
+ }
+
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return 0;
+
+err_pgt_entry_port_get:
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+err_pgt_entry_get:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+ return err;
+}
+
+static void mlxsw_sp_pgt_entry_port_del(struct mlxsw_sp *mlxsw_sp,
+ u16 mid, u16 smpe, u16 local_port)
+{
+ struct mlxsw_sp_pgt_entry_port *pgt_entry_port;
+ struct mlxsw_sp_pgt_entry *pgt_entry;
+
+ mutex_lock(&mlxsw_sp->pgt->lock);
+
+ pgt_entry = idr_find(&mlxsw_sp->pgt->pgt_idr, mid);
+ if (!pgt_entry)
+ goto out;
+
+ pgt_entry_port = mlxsw_sp_pgt_entry_port_lookup(pgt_entry, local_port);
+ if (!pgt_entry_port)
+ goto out;
+
+ mlxsw_sp_pgt_entry_port_destroy(mlxsw_sp, pgt_entry, pgt_entry_port);
+ mlxsw_sp_pgt_entry_put(mlxsw_sp->pgt, mid);
+
+out:
+ mutex_unlock(&mlxsw_sp->pgt->lock);
+}
+
+int mlxsw_sp_pgt_entry_port_set(struct mlxsw_sp *mlxsw_sp, u16 mid,
+ u16 smpe, u16 local_port, bool member)
+{
+ if (member)
+ return mlxsw_sp_pgt_entry_port_add(mlxsw_sp, mid, smpe,
+ local_port);
+
+ mlxsw_sp_pgt_entry_port_del(mlxsw_sp, mid, smpe, local_port);
+ return 0;
+}
+
+int mlxsw_sp_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_pgt *pgt;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, PGT_SIZE))
+ return -EIO;
+
+ pgt = kzalloc(sizeof(*mlxsw_sp->pgt), GFP_KERNEL);
+ if (!pgt)
+ return -ENOMEM;
+
+ idr_init(&pgt->pgt_idr);
+ pgt->end_index = MLXSW_CORE_RES_GET(mlxsw_sp->core, PGT_SIZE);
+ mutex_init(&pgt->lock);
+ pgt->smpe_index_valid = mlxsw_sp->pgt_smpe_index_valid;
+ mlxsw_sp->pgt = pgt;
+ return 0;
+}
+
+void mlxsw_sp_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mutex_destroy(&mlxsw_sp->pgt->lock);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->pgt->pgt_idr));
+ idr_destroy(&mlxsw_sp->pgt->pgt_idr);
+ kfree(mlxsw_sp->pgt);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
index 39052e5c12fd..22ebb207ce4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
@@ -94,10 +94,10 @@ mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
atomic_set(&family->policers_count, 0);
devlink = priv_to_devlink(core);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- mlxsw_sp_policer_single_rate_occ_get,
- family);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ mlxsw_sp_policer_single_rate_occ_get,
+ family);
return 0;
}
@@ -107,8 +107,8 @@ mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
{
struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
WARN_ON(atomic_read(&family->policers_count) != 0);
}
@@ -419,22 +419,22 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
devlink_resource_size_params_init(&size_params, global_policers,
global_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "global_policers",
- global_policers,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
+ err = devl_resource_register(devlink, "global_policers",
+ global_policers,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, single_rate_policers,
single_rate_policers, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
- err = devlink_resource_register(devlink, "single_rate_policers",
- single_rate_policers,
- MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
- MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
- &size_params);
+ err = devl_resource_register(devlink, "single_rate_policers",
+ single_rate_policers,
+ MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+ &size_params);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 35422e64d89f..2e0b704b8a31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -11,6 +11,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/net_tstamp.h>
+#include <linux/refcount.h>
#include "spectrum.h"
#include "spectrum_ptp.h"
@@ -29,12 +30,24 @@
struct mlxsw_sp_ptp_state {
struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp1_ptp_state {
+ struct mlxsw_sp_ptp_state common;
struct rhltable unmatched_ht;
spinlock_t unmatched_lock; /* protects the HT */
struct delayed_work ht_gc_dw;
u32 gc_cycle;
};
+struct mlxsw_sp2_ptp_state {
+ struct mlxsw_sp_ptp_state common;
+ refcount_t ptp_port_enabled_ref; /* Number of ports with time stamping
+ * enabled.
+ */
+ struct hwtstamp_config config;
+};
+
struct mlxsw_sp1_ptp_key {
u16 local_port;
u8 message_type;
@@ -60,20 +73,44 @@ static const struct rhashtable_params mlxsw_sp1_ptp_unmatched_ht_params = {
struct mlxsw_sp_ptp_clock {
struct mlxsw_core *core;
+ struct ptp_clock *ptp;
+ struct ptp_clock_info ptp_info;
+};
+
+struct mlxsw_sp1_ptp_clock {
+ struct mlxsw_sp_ptp_clock common;
spinlock_t lock; /* protect this structure */
struct cyclecounter cycles;
struct timecounter tc;
u32 nominal_c_mult;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
unsigned long overflow_period;
struct delayed_work overflow_work;
};
-static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
+static struct mlxsw_sp1_ptp_state *
+mlxsw_sp1_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp1_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp2_ptp_state *
+mlxsw_sp2_ptp_state(struct mlxsw_sp *mlxsw_sp)
+{
+ return container_of(mlxsw_sp->ptp_state, struct mlxsw_sp2_ptp_state,
+ common);
+}
+
+static struct mlxsw_sp1_ptp_clock *
+mlxsw_sp1_ptp_clock(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mlxsw_sp1_ptp_clock, common.ptp_info);
+}
+
+static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp1_ptp_clock *clock,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u32 frc_h1, frc_h2, frc_l;
frc_h1 = mlxsw_core_read_frc_h(mlxsw_core);
@@ -94,20 +131,20 @@ static u64 __mlxsw_sp1_ptp_read_frc(struct mlxsw_sp_ptp_clock *clock,
static u64 mlxsw_sp1_ptp_read_frc(const struct cyclecounter *cc)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(cc, struct mlxsw_sp_ptp_clock, cycles);
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(cc, struct mlxsw_sp1_ptp_clock, cycles);
return __mlxsw_sp1_ptp_read_frc(clock, NULL) & cc->mask;
}
static int
-mlxsw_sp1_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
+mlxsw_sp_ptp_phc_adjfreq(struct mlxsw_sp_ptp_clock *clock, int freq_adj)
{
struct mlxsw_core *mlxsw_core = clock->core;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
mlxsw_reg_mtutc_pack(mtutc_pl, MLXSW_REG_MTUTC_OPERATION_ADJUST_FREQ,
- freq_adj, 0);
+ freq_adj, 0, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
@@ -122,9 +159,9 @@ static u64 mlxsw_sp1_ptp_ns2cycles(const struct timecounter *tc, u64 nsec)
}
static int
-mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp1_ptp_clock *clock, u64 nsec)
{
- struct mlxsw_core *mlxsw_core = clock->core;
+ struct mlxsw_core *mlxsw_core = clock->common.core;
u64 next_sec, next_sec_in_nsec, cycles;
char mtutc_pl[MLXSW_REG_MTUTC_LEN];
char mtpps_pl[MLXSW_REG_MTPPS_LEN];
@@ -144,14 +181,13 @@ mlxsw_sp1_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
mlxsw_reg_mtutc_pack(mtutc_pl,
MLXSW_REG_MTUTC_OPERATION_SET_TIME_AT_NEXT_SEC,
- 0, next_sec);
+ 0, next_sec, 0, 0);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
}
static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
int neg_adj = 0;
u32 diff;
u64 adj;
@@ -174,13 +210,12 @@ static int mlxsw_sp1_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
clock->nominal_c_mult + diff;
spin_unlock_bh(&clock->lock);
- return mlxsw_sp1_ptp_phc_adjfreq(clock, neg_adj ? -ppb : ppb);
+ return mlxsw_sp_ptp_phc_adjfreq(&clock->common, neg_adj ? -ppb : ppb);
}
static int mlxsw_sp1_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec;
spin_lock_bh(&clock->lock);
@@ -195,8 +230,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 cycles, nsec;
spin_lock_bh(&clock->lock);
@@ -212,8 +246,7 @@ static int mlxsw_sp1_ptp_gettimex(struct ptp_clock_info *ptp,
static int mlxsw_sp1_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
- struct mlxsw_sp_ptp_clock *clock =
- container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_sp1_ptp_clock *clock = mlxsw_sp1_ptp_clock(ptp);
u64 nsec = timespec64_to_ns(ts);
spin_lock_bh(&clock->lock);
@@ -237,9 +270,9 @@ static const struct ptp_clock_info mlxsw_sp1_ptp_clock_info = {
static void mlxsw_sp1_ptp_clock_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
- clock = container_of(dwork, struct mlxsw_sp_ptp_clock, overflow_work);
+ clock = container_of(dwork, struct mlxsw_sp1_ptp_clock, overflow_work);
spin_lock_bh(&clock->lock);
timecounter_read(&clock->tc);
@@ -251,7 +284,7 @@ struct mlxsw_sp_ptp_clock *
mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
{
u64 overflow_cycles, nsec, frac = 0;
- struct mlxsw_sp_ptp_clock *clock;
+ struct mlxsw_sp1_ptp_clock *clock;
int err;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
@@ -265,10 +298,9 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(MLXSW_SP1_PTP_CLOCK_MASK);
- clock->core = mlxsw_sp->core;
+ clock->common.core = mlxsw_sp->core;
- timecounter_init(&clock->tc, &clock->cycles,
- ktime_to_ns(ktime_get_real()));
+ timecounter_init(&clock->tc, &clock->cycles, 0);
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -286,7 +318,158 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
INIT_DELAYED_WORK(&clock->overflow_work, mlxsw_sp1_ptp_clock_overflow);
mlxsw_core_schedule_dw(&clock->overflow_work, 0);
- clock->ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp_info = mlxsw_sp1_ptp_clock_info;
+ clock->common.ptp = ptp_clock_register(&clock->common.ptp_info, dev);
+ if (IS_ERR(clock->common.ptp)) {
+ err = PTR_ERR(clock->common.ptp);
+ dev_err(dev, "ptp_clock_register failed %d\n", err);
+ goto err_ptp_clock_register;
+ }
+
+ return &clock->common;
+
+err_ptp_clock_register:
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock_common)
+{
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
+ ptp_clock_unregister(clock_common->ptp);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock);
+}
+
+static u64 mlxsw_sp2_ptp_read_utc(struct mlxsw_sp_ptp_clock *clock,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ u32 utc_sec1, utc_sec2, utc_nsec;
+
+ utc_sec1 = mlxsw_core_read_utc_sec(mlxsw_core);
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ utc_sec2 = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (utc_sec1 != utc_sec2) {
+ /* Wrap around. */
+ ptp_read_system_prets(sts);
+ utc_nsec = mlxsw_core_read_utc_nsec(mlxsw_core);
+ ptp_read_system_postts(sts);
+ }
+
+ return (u64)utc_sec2 * NSEC_PER_SEC + utc_nsec;
+}
+
+static int
+mlxsw_sp2_ptp_phc_settime(struct mlxsw_sp_ptp_clock *clock, u64 nsec)
+{
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+ u32 sec, nsec_rem;
+
+ sec = div_u64_rem(nsec, NSEC_PER_SEC, &nsec_rem);
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_SET_TIME_IMMEDIATE,
+ 0, sec, nsec_rem, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+
+ /* In Spectrum-2 and newer ASICs, the frequency adjustment in MTUTC is
+ * reversed, positive values mean to decrease the frequency. Adjust the
+ * sign of PPB to this behavior.
+ */
+ return mlxsw_sp_ptp_phc_adjfreq(clock, -ppb);
+}
+
+static int mlxsw_sp2_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ struct mlxsw_core *mlxsw_core = clock->core;
+ char mtutc_pl[MLXSW_REG_MTUTC_LEN];
+
+ /* HW time adjustment range is s16. If out of range, set time instead. */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, NULL);
+ nsec += delta;
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+ }
+
+ mlxsw_reg_mtutc_pack(mtutc_pl,
+ MLXSW_REG_MTUTC_OPERATION_ADJUST_TIME,
+ 0, 0, 0, delta);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtutc), mtutc_pl);
+}
+
+static int mlxsw_sp2_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec;
+
+ nsec = mlxsw_sp2_ptp_read_utc(clock, sts);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlxsw_sp_ptp_clock *clock =
+ container_of(ptp, struct mlxsw_sp_ptp_clock, ptp_info);
+ u64 nsec = timespec64_to_ns(ts);
+
+ return mlxsw_sp2_ptp_phc_settime(clock, nsec);
+}
+
+static const struct ptp_clock_info mlxsw_sp2_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlxsw_sp_clock",
+ .max_adj = MLXSW_REG_MTUTC_MAX_FREQ_ADJ,
+ .adjfine = mlxsw_sp2_ptp_adjfine,
+ .adjtime = mlxsw_sp2_ptp_adjtime,
+ .gettimex64 = mlxsw_sp2_ptp_gettimex,
+ .settime64 = mlxsw_sp2_ptp_settime,
+};
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
+{
+ struct mlxsw_sp_ptp_clock *clock;
+ int err;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ clock->core = mlxsw_sp->core;
+
+ clock->ptp_info = mlxsw_sp2_ptp_clock_info;
+
+ err = mlxsw_sp2_ptp_phc_settime(clock, 0);
+ if (err) {
+ dev_err(dev, "setting UTC time failed %d\n", err);
+ goto err_ptp_phc_settime;
+ }
+
clock->ptp = ptp_clock_register(&clock->ptp_info, dev);
if (IS_ERR(clock->ptp)) {
err = PTR_ERR(clock->ptp);
@@ -297,15 +480,14 @@ mlxsw_sp1_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
return clock;
err_ptp_clock_register:
- cancel_delayed_work_sync(&clock->overflow_work);
+err_ptp_phc_settime:
kfree(clock);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock)
{
ptp_clock_unregister(clock->ptp);
- cancel_delayed_work_sync(&clock->overflow_work);
kfree(clock);
}
@@ -348,7 +530,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
u64 timestamp)
{
int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
- struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int err;
@@ -359,7 +541,7 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
unmatched->key = key;
unmatched->skb = skb;
unmatched->timestamp = timestamp;
- unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
+ unmatched->gc_cycle = ptp_state->gc_cycle + cycles;
err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
@@ -373,11 +555,12 @@ static struct mlxsw_sp1_ptp_unmatched *
mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key, int *p_length)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
struct rhlist_head *tmp, *list;
int length = 0;
- list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
+ list = rhltable_lookup(&ptp_state->unmatched_ht, &key,
mlxsw_sp1_ptp_unmatched_ht_params);
rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
last = unmatched;
@@ -392,7 +575,9 @@ static int
mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
- return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
+
+ return rhltable_remove(&ptp_state->unmatched_ht,
&unmatched->ht_node,
mlxsw_sp1_ptp_unmatched_ht_params);
}
@@ -438,12 +623,16 @@ static void mlxsw_sp1_packet_timestamp(struct mlxsw_sp *mlxsw_sp,
struct sk_buff *skb,
u64 timestamp)
{
+ struct mlxsw_sp_ptp_clock *clock_common = mlxsw_sp->clock;
+ struct mlxsw_sp1_ptp_clock *clock =
+ container_of(clock_common, struct mlxsw_sp1_ptp_clock, common);
+
struct skb_shared_hwtstamps hwtstamps;
u64 nsec;
- spin_lock_bh(&mlxsw_sp->clock->lock);
- nsec = timecounter_cyc2time(&mlxsw_sp->clock->tc, timestamp);
- spin_unlock_bh(&mlxsw_sp->clock->lock);
+ spin_lock_bh(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ spin_unlock_bh(&clock->lock);
hwtstamps.hwtstamp = ns_to_ktime(nsec);
mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
@@ -481,13 +670,14 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_ptp_key key,
struct sk_buff *skb, u64 timestamp)
{
+ struct mlxsw_sp1_ptp_state *ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
struct mlxsw_sp1_ptp_unmatched *unmatched;
int length;
int err;
rcu_read_lock();
- spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_lock(&ptp_state->unmatched_lock);
unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
if (skb && unmatched && unmatched->timestamp) {
@@ -515,7 +705,7 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(err);
}
- spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
+ spin_unlock(&ptp_state->unmatched_lock);
if (unmatched)
mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
@@ -611,9 +801,10 @@ void mlxsw_sp1_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
+mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp1_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp *mlxsw_sp = ptp_state->common.mlxsw_sp;
struct mlxsw_sp_ptp_port_dir_stats *stats;
struct mlxsw_sp_port *mlxsw_sp_port;
int err;
@@ -636,7 +827,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
- mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ mlxsw_sp_port = mlxsw_sp->ports[unmatched->key.local_port];
if (mlxsw_sp_port) {
stats = unmatched->key.ingress ?
&mlxsw_sp_port->ptp.stats.rx_gcd :
@@ -653,7 +844,7 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
* netif_receive_skb(), in process context, is seen elsewhere in the
* kernel, notably in pktgen.
*/
- mlxsw_sp1_ptp_unmatched_finish(ptp_state->mlxsw_sp, unmatched);
+ mlxsw_sp1_ptp_unmatched_finish(mlxsw_sp, unmatched);
out:
local_bh_enable();
@@ -663,12 +854,12 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlxsw_sp1_ptp_unmatched *unmatched;
- struct mlxsw_sp_ptp_state *ptp_state;
+ struct mlxsw_sp1_ptp_state *ptp_state;
struct rhashtable_iter iter;
u32 gc_cycle;
void *obj;
- ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
+ ptp_state = container_of(dwork, struct mlxsw_sp1_ptp_state, ht_gc_dw);
gc_cycle = ptp_state->gc_cycle++;
rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
@@ -694,7 +885,7 @@ static int mlxsw_sp_ptp_mtptpt_set(struct mlxsw_sp *mlxsw_sp,
{
char mtptpt_pl[MLXSW_REG_MTPTPT_LEN];
- mlxsw_reg_mtptptp_pack(mtptpt_pl, trap_id, message_type);
+ mlxsw_reg_mtptpt_pack(mtptpt_pl, trap_id, message_type);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtptpt), mtptpt_pl);
}
@@ -807,10 +998,44 @@ static int mlxsw_sp1_ptp_shaper_params_set(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_ptp_traps_set(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 event_message_type;
+ int err;
+
+ /* Deliver these message types as PTP0. */
+ event_message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
+
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
+ event_message_type);
+ if (err)
+ return err;
+
+ /* Everything else is PTP1. */
+ err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
+ ~event_message_type);
+ if (err)
+ goto err_mtptpt1_set;
+
+ return 0;
+
+err_mtptpt1_set:
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ return err;
+}
+
+static void mlxsw_sp_ptp_traps_unset(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
+ mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+}
+
struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_ptp_state *ptp_state;
- u16 message_type;
+ struct mlxsw_sp1_ptp_state *ptp_state;
int err;
err = mlxsw_sp1_ptp_shaper_params_set(mlxsw_sp);
@@ -820,7 +1045,7 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
if (!ptp_state)
return ERR_PTR(-ENOMEM);
- ptp_state->mlxsw_sp = mlxsw_sp;
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
spin_lock_init(&ptp_state->unmatched_lock);
@@ -829,22 +1054,9 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_hashtable_init;
- /* Delive these message types as PTP0. */
- message_type = BIT(PTP_MSGTYPE_SYNC) |
- BIT(PTP_MSGTYPE_DELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_REQ) |
- BIT(PTP_MSGTYPE_PDELAY_RESP);
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
- message_type);
- if (err)
- goto err_mtptpt_set;
-
- /* Everything else is PTP1. */
- message_type = ~message_type;
- err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1,
- message_type);
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
if (err)
- goto err_mtptpt1_set;
+ goto err_ptp_traps_set;
err = mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, true);
if (err)
@@ -853,28 +1065,28 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
INIT_DELAYED_WORK(&ptp_state->ht_gc_dw, mlxsw_sp1_ptp_ht_gc);
mlxsw_core_schedule_dw(&ptp_state->ht_gc_dw,
MLXSW_SP1_PTP_HT_GC_INTERVAL);
- return ptp_state;
+ return &ptp_state->common;
err_fifo_clr:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
-err_mtptpt1_set:
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
-err_mtptpt_set:
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+err_ptp_traps_set:
rhltable_destroy(&ptp_state->unmatched_ht);
err_hashtable_init:
kfree(ptp_state);
return ERR_PTR(err);
}
-void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
+void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
{
- struct mlxsw_sp *mlxsw_sp = ptp_state->mlxsw_sp;
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp1_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp1_ptp_state(mlxsw_sp);
cancel_delayed_work_sync(&ptp_state->ht_gc_dw);
mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp, 0, 0);
mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
- mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
rhltable_free_and_destroy(&ptp_state->unmatched_ht,
&mlxsw_sp1_ptp_unmatched_free_fn, NULL);
kfree(ptp_state);
@@ -887,9 +1099,10 @@ int mlxsw_sp1_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_ptp_get_message_types(const struct hwtstamp_config *config,
- u16 *p_ing_types, u16 *p_egr_types,
- enum hwtstamp_rx_filters *p_rx_filter)
+static int
+mlxsw_sp1_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
{
enum hwtstamp_rx_filters rx_filter = config->rx_filter;
enum hwtstamp_tx_types tx_type = config->tx_type;
@@ -1050,8 +1263,8 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 egr_types;
int err;
- err = mlxsw_sp_ptp_get_message_types(config, &ing_types, &egr_types,
- &rx_filter);
+ err = mlxsw_sp1_ptp_get_message_types(config, &ing_types, &egr_types,
+ &rx_filter);
if (err)
return err;
@@ -1144,3 +1357,354 @@ void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
*data++ = *(u64 *)(stats + offset);
}
}
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ptp_state = kzalloc(sizeof(*ptp_state), GFP_KERNEL);
+ if (!ptp_state)
+ return ERR_PTR(-ENOMEM);
+
+ ptp_state->common.mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_ptp_traps_set(mlxsw_sp);
+ if (err)
+ goto err_ptp_traps_set;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 0);
+ return &ptp_state->common;
+
+err_ptp_traps_set:
+ kfree(ptp_state);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state_common)
+{
+ struct mlxsw_sp *mlxsw_sp = ptp_state_common->mlxsw_sp;
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+
+ mlxsw_sp_ptp_traps_unset(mlxsw_sp);
+ kfree(ptp_state);
+}
+
+static u32 mlxsw_ptp_utc_time_stamp_sec_get(struct mlxsw_core *mlxsw_core,
+ u8 cqe_ts_sec)
+{
+ u32 utc_sec = mlxsw_core_read_utc_sec(mlxsw_core);
+
+ if (cqe_ts_sec > (utc_sec & 0xff))
+ /* Time stamp above the last bits of UTC (UTC & 0xff) means the
+ * latter has wrapped after the time stamp was collected.
+ */
+ utc_sec -= 256;
+
+ utc_sec &= ~0xff;
+ utc_sec |= cqe_ts_sec;
+
+ return utc_sec;
+}
+
+static void mlxsw_sp2_ptp_hwtstamp_fill(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_skb_cb *cb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ u64 ts_sec, ts_nsec, nsec;
+
+ WARN_ON_ONCE(!cb->cqe_ts.sec && !cb->cqe_ts.nsec);
+
+ /* The time stamp in the CQE is represented by 38 bits, which is a short
+ * representation of UTC time. Software should create the full time
+ * stamp using the global UTC clock. The seconds have only 8 bits in the
+ * CQE, to create the full time stamp, use the current UTC time and fix
+ * the seconds according to the relation between UTC seconds and CQE
+ * seconds.
+ */
+ ts_sec = mlxsw_ptp_utc_time_stamp_sec_get(mlxsw_core, cb->cqe_ts.sec);
+ ts_nsec = cb->cqe_ts.nsec;
+
+ nsec = ts_sec * NSEC_PER_SEC + ts_nsec;
+
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ *skb_hwtstamps(skb) = hwtstamps;
+ mlxsw_sp_rx_listener_no_mark_func(skb, local_port, mlxsw_sp);
+}
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port)
+{
+ struct skb_shared_hwtstamps hwtstamps;
+
+ mlxsw_sp2_ptp_hwtstamp_fill(mlxsw_sp->core, mlxsw_skb_cb(skb),
+ &hwtstamps);
+ skb_tstamp_tx(skb, &hwtstamps);
+ dev_kfree_skb_any(skb);
+}
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ *config = ptp_state->config;
+ return 0;
+}
+
+static int
+mlxsw_sp2_ptp_get_message_types(const struct hwtstamp_config *config,
+ u16 *p_ing_types, u16 *p_egr_types,
+ enum hwtstamp_rx_filters *p_rx_filter)
+{
+ enum hwtstamp_rx_filters rx_filter = config->rx_filter;
+ enum hwtstamp_tx_types tx_type = config->tx_type;
+ u16 ing_types = 0x00;
+ u16 egr_types = 0x00;
+
+ *p_rx_filter = rx_filter;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ing_types = 0x00;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* In Spectrum-2 and above, all packets get time stamp by
+ * default and the driver fill the time stamp only for event
+ * packets. Return all event types even if only specific types
+ * were required.
+ */
+ ing_types = 0x0f;
+ *p_rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ egr_types = 0x00;
+ break;
+ case HWTSTAMP_TX_ON:
+ egr_types = 0x0f;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+
+ *p_ing_types = ing_types;
+ *p_egr_types = egr_types;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_mtpcpc_set(struct mlxsw_sp *mlxsw_sp, bool ptp_trap_en,
+ u16 ing_types, u16 egr_types)
+{
+ char mtpcpc_pl[MLXSW_REG_MTPCPC_LEN];
+
+ mlxsw_reg_mtpcpc_pack(mtpcpc_pl, false, 0, ptp_trap_en, ing_types,
+ egr_types);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mtpcpc), mtpcpc_pl);
+}
+
+static int mlxsw_sp2_ptp_enable(struct mlxsw_sp *mlxsw_sp, u16 ing_types,
+ u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, true, ing_types, egr_types);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_disable(struct mlxsw_sp *mlxsw_sp,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp);
+ int err;
+
+ err = mlxsw_sp2_ptp_mtpcpc_set(mlxsw_sp, false, 0, 0);
+ if (err)
+ return err;
+
+ ptp_state->config = new_config;
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_configure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 ing_types, u16 egr_types,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (refcount_inc_not_zero(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_enable(mlxsw_sp_port->mlxsw_sp, ing_types,
+ egr_types, new_config);
+ if (err)
+ return err;
+
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+
+ return 0;
+}
+
+static int mlxsw_sp2_ptp_deconfigure_port(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config new_config)
+{
+ struct mlxsw_sp2_ptp_state *ptp_state;
+ int err;
+
+ ASSERT_RTNL();
+
+ ptp_state = mlxsw_sp2_ptp_state(mlxsw_sp_port->mlxsw_sp);
+
+ if (!refcount_dec_and_test(&ptp_state->ptp_port_enabled_ref))
+ return 0;
+
+ err = mlxsw_sp2_ptp_disable(mlxsw_sp_port->mlxsw_sp, new_config);
+ if (err)
+ goto err_ptp_disable;
+
+ return 0;
+
+err_ptp_disable:
+ refcount_set(&ptp_state->ptp_port_enabled_ref, 1);
+ return err;
+}
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config)
+{
+ enum hwtstamp_rx_filters rx_filter;
+ struct hwtstamp_config new_config;
+ u16 new_ing_types, new_egr_types;
+ bool ptp_enabled;
+ int err;
+
+ err = mlxsw_sp2_ptp_get_message_types(config, &new_ing_types,
+ &new_egr_types, &rx_filter);
+ if (err)
+ return err;
+
+ new_config.flags = config->flags;
+ new_config.tx_type = config->tx_type;
+ new_config.rx_filter = rx_filter;
+
+ ptp_enabled = mlxsw_sp_port->ptp.ing_types ||
+ mlxsw_sp_port->ptp.egr_types;
+
+ if ((new_ing_types || new_egr_types) && !ptp_enabled) {
+ err = mlxsw_sp2_ptp_configure_port(mlxsw_sp_port, new_ing_types,
+ new_egr_types, new_config);
+ if (err)
+ return err;
+ } else if (!new_ing_types && !new_egr_types && ptp_enabled) {
+ err = mlxsw_sp2_ptp_deconfigure_port(mlxsw_sp_port, new_config);
+ if (err)
+ return err;
+ }
+
+ mlxsw_sp_port->ptp.ing_types = new_ing_types;
+ mlxsw_sp_port->ptp.egr_types = new_egr_types;
+
+ /* Notify the ioctl caller what we are actually timestamping. */
+ config->rx_filter = rx_filter;
+
+ return 0;
+}
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ mlxsw_sp_txhdr_construct(skb, tx_info);
+ return 0;
+}
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ /* In Spectrum-2 and Spectrum-3, in order for PTP event packets to have
+ * their correction field correctly set on the egress port they must be
+ * transmitted as data packets. Such packets ingress the ASIC via the
+ * CPU port and must have a VLAN tag, as the CPU port is not configured
+ * with a PVID. Push the default VLAN (4095), which is configured as
+ * egress untagged on all the ports.
+ */
+ if (!skb_vlan_tagged(skb)) {
+ skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
+ MLXSW_SP_DEFAULT_VID);
+ if (!skb) {
+ this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+ return -ENOMEM;
+ }
+ }
+
+ return mlxsw_sp_txhdr_ptp_data_construct(mlxsw_core, mlxsw_sp_port, skb,
+ tx_info);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index c06cd1384bca..2d1628fdefc1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -57,6 +57,40 @@ void mlxsw_sp1_get_stats_strings(u8 **p);
void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index);
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_sp_ptp_clock *
+mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+
+void mlxsw_sp2_ptp_clock_fini(struct mlxsw_sp_ptp_clock *clock);
+
+struct mlxsw_sp_ptp_state *mlxsw_sp2_ptp_init(struct mlxsw_sp *mlxsw_sp);
+
+void mlxsw_sp2_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state);
+
+void mlxsw_sp2_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u16 local_port);
+
+void mlxsw_sp2_ptp_transmitted(struct mlxsw_sp *mlxsw_sp,
+ struct sk_buff *skb, u16 local_port);
+
+int mlxsw_sp2_ptp_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct hwtstamp_config *config);
+
+int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
+ struct ethtool_ts_info *info);
+
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -136,7 +170,14 @@ static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *data, int data_index)
{
}
-#endif
+
+int mlxsw_sp_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
static inline struct mlxsw_sp_ptp_clock *
mlxsw_sp2_ptp_clock_init(struct mlxsw_sp *mlxsw_sp, struct device *dev)
@@ -184,16 +225,25 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
-{
-}
-
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct sk_buff *skb,
+ const struct mlxsw_tx_info *tx_info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static inline void mlxsw_sp2_ptp_shaper_work(struct work_struct *work)
+{
+}
+
static inline int mlxsw_sp2_get_stats_count(void)
{
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index ce33dbde124d..2c4443c6b964 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -443,65 +443,12 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
-static struct mlxsw_sp_fib_entry_priv *
-mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- struct mlxsw_sp_fib_entry_priv *priv;
-
- if (!ll_ops->fib_entry_priv_size)
- /* No need to have priv */
- return NULL;
-
- priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- refcount_set(&priv->refcnt, 1);
- return priv;
-}
-
-static void
-mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
-{
- kfree(priv);
-}
-
-static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
-{
- refcount_inc(&priv->refcnt);
-}
-
-static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv || !refcount_dec_and_test(&priv->refcnt))
- return;
- mlxsw_sp_fib_entry_priv_destroy(priv);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- if (!priv)
- return;
- mlxsw_sp_fib_entry_priv_hold(priv);
- list_add(&priv->list, &op_ctx->fib_entry_priv_list);
-}
-
-static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_priv *priv, *tmp;
-
- list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
- mlxsw_sp_fib_entry_priv_put(priv);
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
- struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
@@ -537,7 +484,6 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
- const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -551,45 +497,16 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
-static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- return 0;
-}
-
-static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
- xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
- xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
-}
-
-static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
-}
-
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
- err = ll_ops->init(mlxsw_sp, vr->id, proto);
- if (err)
- return ERR_PTR(err);
-
lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
fib = kzalloc(sizeof(*fib), GFP_KERNEL);
if (!fib)
@@ -601,7 +518,6 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
- fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -640,36 +556,33 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_xralta_pack(xralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- ll_ops->ralta_write(mlxsw_sp, xralta_pl);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char xralst_pl[MLXSW_REG_XRALST_LEN];
+ char ralst_pl[MLXSW_REG_RALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -677,20 +590,19 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -701,11 +613,12 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -716,15 +629,14 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -732,7 +644,6 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -746,7 +657,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -757,11 +668,8 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops =
- mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
-
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -851,23 +759,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
+ mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -5780,14 +5688,13 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ case MLXSW_REG_RALUE_OP_WRITE_WRITE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ case MLXSW_REG_RALUE_OP_WRITE_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -5795,140 +5702,39 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
-struct mlxsw_sp_fib_entry_op_ctx_basic {
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-};
-
static void
-mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
+mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
+ const struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
- enum mlxsw_reg_ralxx_protocol ralxx_proto;
- char *ralue_pl = op_ctx_basic->ralue_pl;
- enum mlxsw_reg_ralue_op ralue_op;
-
- ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+ enum mlxsw_reg_ralxx_protocol proto;
+ u32 *p_dip;
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
+ proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
- switch (proto) {
+ switch (fib->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, (u32 *) addr);
+ p_dip = (u32 *) fib_entry->fib_node->key.addr;
+ mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ *p_dip);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
- virtual_router, prefix_len, addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr);
break;
}
}
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
- trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
-}
-
-static void
-mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
-}
-
-static int
-mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- op_ctx_basic->ralue_pl);
-}
-
-static bool
-mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- return true;
-}
-
-static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
-{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
-
- mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
- fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr,
- fib_entry->priv);
-}
-
-static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- const struct mlxsw_sp_router_ll_ops *ll_ops)
-{
- bool postponed_for_bulk = false;
- int err;
-
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
- if (!postponed_for_bulk)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -5951,20 +5757,19 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -5976,64 +5781,61 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
+ rif_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
int err;
if (WARN_ON(!ipip_entry))
@@ -6045,55 +5847,54 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
- mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
- ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
- fib_entry->decap.tunnel_index);
- return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
+ mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
+ mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
+ op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
+ fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_sp_fib_entry_op op)
+ enum mlxsw_reg_ralue_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
if (err)
return err;
@@ -6103,35 +5904,18 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry,
- bool is_new)
-{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
- MLXSW_SP_FIB_ENTRY_OP_UPDATE);
-}
-
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
-
- if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
- return 0;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
- MLXSW_SP_FIB_ENTRY_OP_DELETE);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
}
static int
@@ -6226,12 +6010,6 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
@@ -6260,8 +6038,6 @@ err_fib4_entry_type_set:
err_nexthop_group_vr_link:
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
err_nexthop4_group_get:
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -6276,7 +6052,6 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
- mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -6514,16 +6289,14 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_update;
@@ -6534,25 +6307,14 @@ err_fib_entry_update:
return err;
}
-static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry *fib_entry)
+static void
+mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
- int err;
- err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
fib_node->fib_entry = NULL;
- return err;
-}
-
-static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
-{
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -6574,7 +6336,6 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -6609,7 +6370,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -6634,23 +6395,20 @@ err_fib4_entry_create:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
- int err;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return 0;
+ return;
fib_node = fib4_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -6958,9 +6716,9 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
@@ -6983,8 +6741,7 @@ static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
- &fib6_entry->common, false);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_entry_update;
@@ -7008,7 +6765,6 @@ err_nexthop6_group_get:
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7026,7 +6782,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
if (err)
goto err_rt6_unwind;
@@ -7045,7 +6801,6 @@ err_rt6_unwind:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -7063,7 +6818,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
}
static int
@@ -7149,12 +6904,6 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
- fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
- if (IS_ERR(fib_entry->priv)) {
- err = PTR_ERR(fib_entry->priv);
- goto err_fib_entry_priv_create;
- }
-
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -7196,8 +6945,6 @@ err_rt6_unwind:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
-err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -7220,7 +6967,6 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
- mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -7278,8 +7024,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -7318,7 +7064,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -7342,8 +7088,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -7371,7 +7117,8 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -7382,17 +7129,16 @@ err_fib6_entry_nexthop_add:
return err;
}
-static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct fib6_info **rt_arr, unsigned int nrt6)
+static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
- int err;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return 0;
+ return;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -7401,22 +7147,22 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return 0;
+ return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
- return 0;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
+ nrt6);
+ return;
}
fib_node = fib6_entry->common.fib_node;
- err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
+ mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
- return err;
}
static struct mlxsw_sp_mr_table *
@@ -7569,15 +7315,15 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
}
}
-struct mlxsw_sp_fib6_event {
+struct mlxsw_sp_fib6_event_work {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event {
- struct list_head list; /* node in fib queue */
+struct mlxsw_sp_fib_event_work {
+ struct work_struct work;
union {
- struct mlxsw_sp_fib6_event fib6_event;
+ struct mlxsw_sp_fib6_event_work fib6_work;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -7586,12 +7332,11 @@ struct mlxsw_sp_fib_event {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
- int family;
};
static int
-mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -7605,8 +7350,8 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
if (!rt_arr)
return -ENOMEM;
- fib6_event->rt_arr = rt_arr;
- fib6_event->nrt6 = nrt6;
+ fib6_work->rt_arr = rt_arr;
+ fib6_work->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -7628,242 +7373,182 @@ mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
}
static void
-mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
+mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
{
int i;
- for (i = 0; i < fib6_event->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
- kfree(fib6_event->rt_arr);
+ for (i = 0; i < fib6_work->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
+ kfree(fib6_work->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
+ &fib_work->fen_info);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
- &fib_event->fen_info);
+ &fib_work->fen_info);
}
- fib_info_put(fib_event->fen_info.fi);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- fib_info_put(fib_event->fen_info.fi);
+ mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
+ fib_info_put(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
- fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
+ fib_work->fnh_info.fib_nh);
+ fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
{
- struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
+ mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
if (err) {
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
- fib6_event->rt_arr,
- fib6_event->nrt6);
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
}
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
case FIB_EVENT_ENTRY_DEL:
- err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
- fib_event->fib6_event.nrt6);
- if (err)
- mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
+ mlxsw_sp_router_fib6_del(mlxsw_sp,
+ fib6_work->rt_arr,
+ fib6_work->nrt6);
+ mlxsw_sp_router_fib6_work_fini(fib6_work);
break;
}
+ mutex_unlock(&mlxsw_sp->router->lock);
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_event *fib_event)
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
- mr_cache_put(fib_event->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
- mr_cache_put(fib_event->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_event->ven_info);
+ &fib_work->ven_info);
if (err)
dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
- dev_put(fib_event->ven_info.dev);
+ dev_put(fib_work->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
- dev_put(fib_event->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
+ kfree(fib_work);
}
-static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
-{
- struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
- struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
- struct mlxsw_sp_fib_event *next_fib_event;
- struct mlxsw_sp_fib_event *fib_event;
- int last_family = AF_UNSPEC;
- LIST_HEAD(fib_event_queue);
-
- spin_lock_bh(&router->fib_event_queue_lock);
- list_splice_init(&router->fib_event_queue, &fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
-
- /* Router lock is held here to make sure per-instance
- * operation context is not used in between FIB4/6 events
- * processing.
- */
- mutex_lock(&router->lock);
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- list_for_each_entry_safe(fib_event, next_fib_event,
- &fib_event_queue, list) {
- /* Check if the next entry in the queue exists and it is
- * of the same type (family and event) as the currect one.
- * In that case it is permitted to do the bulking
- * of multiple FIB entries to a single register write.
- */
- op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
- fib_event->family == next_fib_event->family &&
- fib_event->event == next_fib_event->event;
- op_ctx->event = fib_event->event;
-
- /* In case family of this and the previous entry are different, context
- * reinitialization is going to be needed now, indicate that.
- * Note that since last_family is initialized to AF_UNSPEC, this is always
- * going to happen for the first entry processed in the work.
- */
- if (fib_event->family != last_family)
- op_ctx->initialized = false;
-
- switch (fib_event->family) {
- case AF_INET:
- mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case AF_INET6:
- mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
- fib_event);
- break;
- case RTNL_FAMILY_IP6MR:
- case RTNL_FAMILY_IPMR:
- /* Unlock here as inside FIBMR the lock is taken again
- * under RTNL. The per-instance operation context
- * is not used by FIBMR.
- */
- mutex_unlock(&router->lock);
- mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
- fib_event);
- mutex_lock(&router->lock);
- break;
- default:
- WARN_ON_ONCE(1);
- }
- last_family = fib_event->family;
- kfree(fib_event);
- cond_resched();
- }
- WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- mutex_unlock(&router->lock);
-}
-
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_event->fen_info = *fen_info;
+ fib_work->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while event is queued. Release it afterwards.
+ * freed while work is queued. Release it afterwards.
*/
- fib_info_hold(fib_event->fen_info.fi);
+ fib_info_hold(fib_work->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_event->fnh_info = *fnh_info;
- fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
+ fib_work->fnh_info = *fnh_info;
+ fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
- fen6_info);
+ err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
+ fen6_info);
if (err)
return err;
break;
@@ -7873,20 +7558,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
- switch (fib_event->event) {
+ switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
- mr_cache_hold(fib_event->men_info.mfc);
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
- dev_hold(fib_event->ven_info.dev);
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
@@ -7940,7 +7625,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event *fib_event;
+ struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -7972,39 +7657,37 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
break;
}
- fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
- if (!fib_event)
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (!fib_work)
return NOTIFY_BAD;
- fib_event->mlxsw_sp = router->mlxsw_sp;
- fib_event->event = event;
- fib_event->family = info->family;
+ fib_work->mlxsw_sp = router->mlxsw_sp;
+ fib_work->event = event;
switch (info->family) {
case AF_INET:
- mlxsw_sp_router_fib4_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
+ mlxsw_sp_router_fib4_event(fib_work, info);
break;
case AF_INET6:
- err = mlxsw_sp_router_fib6_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
+ err = mlxsw_sp_router_fib6_event(fib_work, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- mlxsw_sp_router_fibmr_event(fib_event, info);
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
break;
}
- /* Enqueue the event and trigger the work */
- spin_lock_bh(&router->fib_event_queue_lock);
- list_add_tail(&fib_event->list, &router->fib_event_queue);
- spin_unlock_bh(&router->fib_event_queue_lock);
- mlxsw_core_schedule_work(&router->fib_event_work);
+ mlxsw_core_schedule_work(&fib_work->work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_event);
+ kfree(fib_work);
return NOTIFY_BAD;
}
@@ -8463,6 +8146,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_counters_alloc(rif);
}
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return rif;
err_stats_enable:
@@ -8492,6 +8176,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *vr;
int i;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
@@ -8650,6 +8335,13 @@ static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
}
+static u64 mlxsw_sp_rifs_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rifs_count);
+}
+
static struct mlxsw_sp_rif_mac_profile *
mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
struct netlink_ext_ack *extack)
@@ -8898,9 +8590,7 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (netif_is_bridge_port(port_dev) ||
- netif_is_lag_port(port_dev) ||
- netif_is_ovs_port(port_dev))
+ if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
return 0;
return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
@@ -9624,17 +9314,18 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 efid;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
+ efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
- rif_subport->vid);
-
+ efid, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9659,9 +9350,15 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
err_rif_subport_op:
@@ -9673,7 +9370,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9697,10 +9394,9 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
.fid_get = mlxsw_sp_rif_subport_fid_get,
};
-static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
- enum mlxsw_reg_ritr_if_type type,
- u16 vid_fid, bool enable)
+static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
+ enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9708,7 +9404,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
- mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
+ mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
@@ -9732,10 +9428,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return err;
rif->mac_profile_id = mac_profile;
- err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
- true);
+ err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
if (err)
- goto err_rif_vlan_fid_op;
+ goto err_rif_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9752,9 +9447,15 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_fdb_op;
- mlxsw_sp_fid_rif_set(rif->fid, rif);
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
return 0;
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
mlxsw_sp_router_port(mlxsw_sp), false);
@@ -9762,8 +9463,8 @@ err_fid_bc_flood_set:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
-err_rif_vlan_fid_op:
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
+err_rif_fid_op:
mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9774,7 +9475,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
- mlxsw_sp_fid_rif_set(fid, NULL);
+ mlxsw_sp_fid_rif_unset(fid);
mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
@@ -9782,7 +9483,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
- mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_fid_op(rif, fid_index, false);
mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
@@ -9859,11 +9560,119 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
NULL);
}
-static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
+ bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
+ rif->dev->mtu, rif->dev->dev_addr,
+ rif->mac_profile_id, vid, efid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
+ struct netlink_ext_ack *extack)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ u8 mac_profile;
+ int err;
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
+ if (err)
+ goto err_rif_vlan_fid_op;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_mc_flood_set;
+
+ err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), true);
+ if (err)
+ goto err_fid_bc_flood_set;
+
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ err = mlxsw_sp_fid_rif_set(rif->fid, rif);
+ if (err)
+ goto err_fid_rif_set;
+
+ return 0;
+
+err_fid_rif_set:
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+err_rif_fdb_op:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_bc_flood_set:
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+err_fid_mc_flood_set:
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
+ return err;
+}
+
+static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
+{
+ u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+
+ mlxsw_sp_fid_rif_unset(rif->fid);
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_fid_index(rif->fid), false);
+ mlxsw_sp_rif_macvlan_flush(rif);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
+ mlxsw_sp_router_port(mlxsw_sp), false);
+ mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
+}
+
+static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
.type = MLXSW_SP_RIF_TYPE_VLAN,
.rif_size = sizeof(struct mlxsw_sp_rif),
- .configure = mlxsw_sp_rif_fid_configure,
- .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .configure = mlxsw_sp1_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
+static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
+{
+ u16 efid = mlxsw_sp_fid_index(rif->fid);
+
+ return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
+}
+
+static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp2_rif_vlan_configure,
+ .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
.fid_get = mlxsw_sp_rif_vlan_fid_get,
.fdb_del = mlxsw_sp_rif_vlan_fdb_del,
};
@@ -9938,7 +9747,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
};
@@ -9981,6 +9790,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
if (err)
goto ul_rif_op_err;
+ atomic_inc(&mlxsw_sp->router->rifs_count);
return ul_rif;
ul_rif_op_err:
@@ -9993,6 +9803,7 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
+ atomic_dec(&mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
kfree(ul_rif);
@@ -10124,7 +9935,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
};
@@ -10148,10 +9959,15 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
- devlink_resource_occ_get_register(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
- mlxsw_sp_rif_mac_profiles_occ_get,
- mlxsw_sp);
+ atomic_set(&mlxsw_sp->router->rifs_count, 0);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+ devl_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIFS,
+ mlxsw_sp_rifs_occ_get,
+ mlxsw_sp);
return 0;
}
@@ -10161,11 +9977,13 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
+ WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
- devlink_resource_occ_get_unregister(devlink,
- MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
+ devl_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
@@ -10546,46 +10364,6 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
- .init = mlxsw_sp_router_ll_basic_init,
- .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
- .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
-};
-
-static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
-{
- size_t max_size = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
- size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
-
- if (size > max_size)
- max_size = size;
- }
- router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
- GFP_KERNEL);
- if (!router->ll_op_ctx)
- return -ENOMEM;
- INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
- return 0;
-}
-
-static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
-{
- WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
- kfree(router->ll_op_ctx);
-}
-
static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
{
u16 lb_rif_index;
@@ -10659,23 +10437,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_ops_init;
- err = mlxsw_sp_router_xm_init(mlxsw_sp);
- if (err)
- goto err_xm_init;
-
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
- &mlxsw_sp_router_ll_xm_ops :
- &mlxsw_sp_router_ll_basic_ops;
- router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
-
- err = mlxsw_sp_router_ll_op_ctx_init(router);
- if (err)
- goto err_ll_op_ctx_init;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
mlxsw_sp_nh_grp_activity_work);
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -10728,10 +10492,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
- INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
- INIT_LIST_HEAD(&router->fib_event_queue);
- spin_lock_init(&router->fib_event_queue_lock);
-
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -10786,7 +10546,6 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10810,10 +10569,6 @@ err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(router);
-err_ll_op_ctx_init:
- mlxsw_sp_router_xm_fini(mlxsw_sp);
-err_xm_init:
err_router_ops_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
@@ -10832,7 +10587,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
- WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -10844,8 +10598,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
- mlxsw_sp_router_xm_fini(mlxsw_sp);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 37411b74c3e6..c5dfb972b433 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,32 +15,12 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
-struct mlxsw_sp_fib_entry_op_ctx {
- u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
- * the actual entry with the one that is the next
- * in queue.
- */
- initialized:1; /* Bit that the low-level op sets in case
- * the context priv is initialized.
- */
- struct list_head fib_entry_priv_list;
- unsigned long event;
- unsigned long ll_priv[];
-};
-
-static inline void
-mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
- memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
-}
-
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
atomic_t rif_mac_profiles_count;
+ atomic_t rifs_count;
u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
@@ -72,14 +52,8 @@ struct mlxsw_sp_router {
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
- struct work_struct fib_event_work;
- struct list_head fib_event_queue;
- spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
- /* One set of ops for each protocol: IPv4 and IPv6 */
- const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
u16 lb_rif_index;
- struct mlxsw_sp_router_xm *xm;
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
size_t adj_grp_size_ranges_count;
struct delayed_work nh_grp_activity_dw;
@@ -89,48 +63,6 @@ struct mlxsw_sp_router {
u32 adj_trap_index;
};
-struct mlxsw_sp_fib_entry_priv {
- refcount_t refcnt;
- struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
- unsigned long priv[];
-};
-
-enum mlxsw_sp_fib_entry_op {
- MLXSW_SP_FIB_ENTRY_OP_WRITE,
- MLXSW_SP_FIB_ENTRY_OP_UPDATE,
- MLXSW_SP_FIB_ENTRY_OP_DELETE,
-};
-
-/* Low-level router ops. Basically this is to handle the different
- * register sets to work with ordinary and XM trees and FIB entries.
- */
-struct mlxsw_sp_router_ll_ops {
- int (*init)(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto);
- int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
- int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
- int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
- size_t fib_entry_op_ctx_size;
- size_t fib_entry_priv_size;
- void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len, unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv);
- void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size);
- void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif);
- void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
- void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr);
- int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk);
- bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
-};
-
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -150,7 +82,6 @@ struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
-u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
@@ -232,10 +163,4 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
-extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp);
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp);
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp);
-
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
deleted file mode 100644
index d213af723a2a..000000000000
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/rhashtable.h>
-
-#include "spectrum.h"
-#include "core.h"
-#include "reg.h"
-#include "spectrum_router.h"
-
-#define MLXSW_SP_ROUTER_XM_M_VAL 16
-
-static const u8 mlxsw_sp_router_xm_m_val[] = {
- [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL,
- [MLXSW_SP_L3_PROTO_IPV6] = 0, /* Currently unused. */
-};
-
-#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16
-
-struct mlxsw_sp_router_xm {
- bool ipv4_supported;
- bool ipv6_supported;
- unsigned int entries_size;
- struct rhashtable ltable_ht;
- struct rhashtable flush_ht; /* Stores items about to be flushed from cache */
- unsigned int flush_count;
- bool flush_all_mode;
-};
-
-struct mlxsw_sp_router_xm_ltable_node {
- struct rhash_head ht_node; /* Member of router_xm->ltable_ht */
- u16 mindex;
- u8 current_lvalue;
- refcount_t refcnt;
- unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1];
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node),
- .key_len = sizeof(u16),
- .automatic_shrinking = true,
-};
-
-struct mlxsw_sp_router_xm_flush_info {
- bool all;
- enum mlxsw_sp_l3proto proto;
- u16 virtual_router;
- u8 prefix_len;
- unsigned char addr[sizeof(struct in6_addr)];
-};
-
-struct mlxsw_sp_router_xm_fib_entry {
- bool committed;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node; /* Parent node */
- u16 mindex; /* Store for processing from commit op */
- u8 lvalue;
- struct mlxsw_sp_router_xm_flush_info flush_info;
-};
-
-#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \
- (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN)
-
-struct mlxsw_sp_fib_entry_op_ctx_xm {
- bool initialized;
- char xmdr_pl[MLXSW_REG_XMDR_LEN];
- unsigned int trans_offset; /* Offset of the current command within one
- * transaction of XMDR register.
- */
- unsigned int trans_item_len; /* The current command length. This is used
- * to advance 'trans_offset' when the next
- * command is appended.
- */
- unsigned int entries_count;
- struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX];
-};
-
-static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
- enum mlxsw_sp_l3proto proto)
-{
- char rxlte_pl[MLXSW_REG_RXLTE_LEN];
-
- mlxsw_reg_rxlte_pack(rxlte_pl, vr_id,
- (enum mlxsw_reg_rxlte_protocol) proto, true);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl);
-}
-
-static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
-{
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl);
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr)
-{
- /* Currently the M-index is set to linear mode. That means it is defined
- * as 16 MSB of IP address.
- */
- return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX;
-}
-
-static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr)
-{
- WARN_ON_ONCE(1);
- return 0; /* currently unused */
-}
-
-static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- if (op_ctx->initialized)
- return;
- op_ctx->initialized = true;
-
- mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true);
- op_ctx_xm->trans_offset = 0;
- op_ctx_xm->entries_count = 0;
-}
-
-static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_sp_l3proto proto,
- enum mlxsw_sp_fib_entry_op op,
- u16 virtual_router, u8 prefix_len,
- unsigned char *addr,
- struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op;
- unsigned int len;
-
- mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm);
-
- switch (op) {
- case MLXSW_SP_FIB_ENTRY_OP_WRITE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE;
- break;
- case MLXSW_SP_FIB_ENTRY_OP_DELETE:
- xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE;
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
-
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, (u32 *) addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- op_ctx_xm->entries_count, xmdr_c_ltr_op,
- virtual_router, prefix_len, addr);
- fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr);
- break;
- default:
- WARN_ON_ONCE(1);
- return;
- }
- if (!op_ctx_xm->trans_offset)
- op_ctx_xm->trans_item_len = len;
- else
- WARN_ON_ONCE(op_ctx_xm->trans_item_len != len);
-
- op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry;
-
- fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ?
- prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0;
-
- flush_info = &fib_entry->flush_info;
- flush_info->proto = proto;
- flush_info->virtual_router = virtual_router;
- flush_info->prefix_len = prefix_len;
- if (addr)
- memcpy(flush_info->addr, addr, sizeof(flush_info->addr));
- else
- memset(flush_info->addr, 0, sizeof(flush_info->addr));
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u32 adjacency_index, u16 ecmp_size)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, adjacency_index, ecmp_size);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- enum mlxsw_reg_ralue_trap_action trap_action,
- u16 trap_id, u16 local_erif)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- trap_action, trap_id, local_erif);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset);
-}
-
-static void
-mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- u32 tunnel_ptr)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
-
- mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset,
- tunnel_ptr);
-}
-
-static struct mlxsw_sp_router_xm_ltable_node *
-mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- int err;
-
- ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (ltable_node) {
- refcount_inc(&ltable_node->refcnt);
- return ltable_node;
- }
- ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL);
- if (!ltable_node)
- return ERR_PTR(-ENOMEM);
- ltable_node->mindex = mindex;
- refcount_set(&ltable_node->refcnt, 1);
-
- err = rhashtable_insert_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_insert;
-
- return ltable_node;
-
-err_insert:
- kfree(ltable_node);
- return ERR_PTR(err);
-}
-
-static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- if (!refcount_dec_and_test(&ltable_node->refcnt))
- return;
- rhashtable_remove_fast(&router_xm->ltable_ht, &ltable_node->ht_node,
- mlxsw_sp_router_xm_ltable_ht_params);
- kfree(ltable_node);
-}
-
-static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_ltable_node *ltable_node)
-{
- char xrmt_pl[MLXSW_REG_XRMT_LEN];
-
- mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl);
-}
-
-struct mlxsw_sp_router_xm_flush_node {
- struct rhash_head ht_node; /* Member of router_xm->flush_ht */
- struct list_head list;
- struct mlxsw_sp_router_xm_flush_info flush_info;
- struct delayed_work dw;
- struct mlxsw_sp *mlxsw_sp;
- unsigned long start_jiffies;
- unsigned int reuses; /* By how many flush calls this was reused. */
- refcount_t refcnt;
-};
-
-static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = {
- .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info),
- .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node),
- .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info),
- .automatic_shrinking = true,
-};
-
-static struct mlxsw_sp_router_xm_flush_node *
-mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- int err;
-
- flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL);
- if (!flush_node)
- return ERR_PTR(-ENOMEM);
-
- flush_node->flush_info = *flush_info;
- err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- if (err) {
- kfree(flush_node);
- return ERR_PTR(err);
- }
- router_xm->flush_count++;
- flush_node->mlxsw_sp = mlxsw_sp;
- flush_node->start_jiffies = jiffies;
- refcount_set(&flush_node->refcnt, 1);
- return flush_node;
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node)
- return;
- refcount_inc(&flush_node->refcnt);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt))
- return;
- kfree(flush_node);
-}
-
-static void
-mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- router_xm->flush_count--;
- rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node,
- mlxsw_sp_router_xm_flush_ht_params);
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
-}
-
-static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len)
-{
- return GENMASK(31, 32 - prefix_len);
-}
-
-static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len)
-{
- static unsigned char mask[sizeof(struct in6_addr)];
-
- memset(mask, 0, sizeof(mask));
- memset(mask, 0xff, prefix_len / 8);
- mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8);
- return mask;
-}
-
-#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15
-#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15
-#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50 /* usecs */
-#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10)
-
-static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work)
-{
- struct mlxsw_sp_router_xm_flush_info *flush_info;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
- char rlcmld_pl[MLXSW_REG_RLCMLD_LEN];
- enum mlxsw_reg_rlcmld_select select;
- struct mlxsw_sp *mlxsw_sp;
- u32 addr4;
- int err;
-
- flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node,
- dw.work);
- mlxsw_sp = flush_node->mlxsw_sp;
- flush_info = &flush_node->flush_info;
-
- if (flush_info->all) {
- char rlpmce_pl[MLXSW_REG_RLPMCE_LEN];
-
- mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce),
- rlpmce_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
- if (flush_node->reuses <
- MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES)
- /* Leaving flush-all mode. */
- mlxsw_sp->router->xm->flush_all_mode = false;
- goto out;
- }
-
- select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES;
-
- switch (flush_info->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- addr4 = *((u32 *) flush_info->addr);
- addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len);
-
- /* In case the flush prefix length is bigger than M-value,
- * it makes no sense to flush M entries. So just flush
- * the ML entries.
- */
- if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL)
- select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES;
-
- mlxsw_reg_rlcmld_pack4(rlcmld_pl, select,
- flush_info->virtual_router, addr4,
- mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len));
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_rlcmld_pack6(rlcmld_pl, select,
- flush_info->virtual_router, flush_info->addr,
- mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len));
- break;
- default:
- WARN_ON(true);
- goto out;
- }
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
-
-out:
- mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node);
-}
-
-static bool
-mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node)
-{
- unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT);
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
-
- /* In case there is the same flushing work pending, check
- * if we can consolidate with it. We can do it up to MAX_WAIT.
- * Cancel the delayed work. If the work was still pending.
- */
- if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) &&
- cancel_delayed_work_sync(&flush_node->dw))
- return true;
- return false;
-}
-
-static int
-mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_flush_info *flush_info)
-{
- unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY);
- struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true};
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_flush_node *flush_node;
-
- /* Check if the queued number of flushes reached critical amount after
- * which it is better to just flush the whole cache.
- */
- if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT)
- /* Entering flush-all mode. */
- router_xm->flush_all_mode = true;
-
- if (router_xm->flush_all_mode)
- flush_info = &flush_all_info;
-
- rcu_read_lock();
- flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info,
- mlxsw_sp_router_xm_flush_ht_params);
- /* Take a reference so the object is not freed before possible
- * delayed work cancel could be done.
- */
- mlxsw_sp_router_xm_cache_flush_node_hold(flush_node);
- rcu_read_unlock();
-
- if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) {
- flush_node->reuses++;
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- /* Original work was within wait period and was canceled.
- * That means that the reference is still held and the
- * flush_node_put() call above did not free the flush_node.
- * Reschedule it with fresh delay.
- */
- goto schedule_work;
- } else {
- mlxsw_sp_router_xm_cache_flush_node_put(flush_node);
- }
-
- flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info);
- if (IS_ERR(flush_node))
- return PTR_ERR(flush_node);
- INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work);
-
-schedule_work:
- mlxsw_core_schedule_dw(&flush_node->dw, delay);
- return 0;
-}
-
-static int
-mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- struct mlxsw_sp_router_xm_ltable_node *ltable_node;
- u8 lvalue = fib_entry->lvalue;
- int err;
-
- ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm,
- fib_entry->mindex);
- if (IS_ERR(ltable_node))
- return PTR_ERR(ltable_node);
- if (lvalue > ltable_node->current_lvalue) {
- /* The L-value is bigger then the one currently set, update. */
- ltable_node->current_lvalue = lvalue;
- err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp,
- ltable_node);
- if (err)
- goto err_lvalue_set;
-
- /* The L value for prefix/M is increased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
-
- ltable_node->lvalue_ref[lvalue]++;
- fib_entry->ltable_node = ltable_node;
-
- return 0;
-
-err_lvalue_set:
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_router_xm_fib_entry *fib_entry)
-{
- struct mlxsw_sp_router_xm_ltable_node *ltable_node =
- fib_entry->ltable_node;
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
- u8 lvalue = fib_entry->lvalue;
-
- ltable_node->lvalue_ref[lvalue]--;
- if (lvalue == ltable_node->current_lvalue && lvalue &&
- !ltable_node->lvalue_ref[lvalue]) {
- u8 new_lvalue = lvalue - 1;
-
- /* Find the biggest L-value left out there. */
- while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue])
- new_lvalue--;
-
- ltable_node->current_lvalue = new_lvalue;
- mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node);
-
- /* The L value for prefix/M is decreased.
- * Therefore, all entries in M and ML caches matching
- * {prefix/M, proto, VR} need to be flushed. Set the flush
- * prefix length to M to achieve that.
- */
- fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL;
- }
- mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node);
-}
-
-static int
-mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
- return err;
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry);
- }
-}
-
-static void
-mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- int err;
- int i;
-
- for (i = 0; i < op_ctx_xm->entries_count; i++) {
- fib_entry = op_ctx_xm->entries[i];
- err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp,
- &fib_entry->flush_info);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n");
- }
-}
-
-static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
- bool *postponed_for_bulk)
-{
- struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv;
- struct mlxsw_sp_router_xm_fib_entry *fib_entry;
- u8 num_rec;
- int err;
- int i;
-
- op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len;
- op_ctx_xm->entries_count++;
-
- /* Check if bulking is possible and there is still room for another
- * FIB entry record. The size of 'trans_item_len' is either size of IPv4
- * command or size of IPv6 command. Not possible to mix those in a
- * single XMDR write.
- */
- if (op_ctx->bulk_ok &&
- op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) {
- if (postponed_for_bulk)
- *postponed_for_bulk = true;
- return 0;
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) {
- /* The L-table is updated inside. It has to be done before
- * the prefix is inserted.
- */
- err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm);
- if (err)
- goto out;
- }
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl);
- if (err)
- goto out;
- num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl);
- if (num_rec > op_ctx_xm->entries_count) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n");
- err = -EIO;
- goto out;
- }
- for (i = 0; i < num_rec; i++) {
- if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) {
- dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n");
- err = -EIO;
- goto out;
- } else {
- fib_entry = op_ctx_xm->entries[i];
- fib_entry->committed = true;
- }
- }
-
- if (op_ctx->event == FIB_EVENT_ENTRY_DEL)
- /* The L-table is updated inside. It has to be done after
- * the prefix was removed.
- */
- mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm);
-
- /* At the very end, do the XLT cache flushing to evict stale
- * M and ML cache entries after prefixes were inserted/removed.
- */
- mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm);
-
-out:
- /* Next pack call is going to do reinitialization */
- op_ctx->initialized = false;
- return err;
-}
-
-static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
-{
- struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv;
-
- return fib_entry->committed;
-}
-
-const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = {
- .init = mlxsw_sp_router_ll_xm_init,
- .ralta_write = mlxsw_sp_router_ll_xm_ralta_write,
- .ralst_write = mlxsw_sp_router_ll_xm_ralst_write,
- .raltb_write = mlxsw_sp_router_ll_xm_raltb_write,
- .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm),
- .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry),
- .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack,
- .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack,
- .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack,
- .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack,
- .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack,
- .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit,
- .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed,
-};
-
-#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024)
-
-int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm;
- char rxltm_pl[MLXSW_REG_RXLTM_LEN];
- char xltq_pl[MLXSW_REG_XLTQ_LEN];
- u32 mindex_size;
- u16 device_id;
- int err;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return 0;
-
- router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL);
- if (!router_xm)
- return -ENOMEM;
-
- mlxsw_reg_xltq_pack(xltq_pl);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl);
- if (err)
- goto err_xltq_query;
- mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported,
- &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size);
-
- if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) {
- dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n");
- err = -EINVAL;
- goto err_device_id_check;
- }
-
- if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) {
- dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n");
- err = -EINVAL;
- goto err_mindex_size_check;
- }
-
- mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4],
- mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl);
- if (err)
- goto err_rxltm_write;
-
- err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params);
- if (err)
- goto err_ltable_ht_init;
-
- err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params);
- if (err)
- goto err_flush_ht_init;
-
- mlxsw_sp->router->xm = router_xm;
- return 0;
-
-err_flush_ht_init:
- rhashtable_destroy(&router_xm->ltable_ht);
-err_ltable_ht_init:
-err_rxltm_write:
-err_mindex_size_check:
-err_device_id_check:
-err_xltq_query:
- kfree(router_xm);
- return err;
-}
-
-void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- if (!mlxsw_sp->bus_info->xm_exists)
- return;
-
- rhashtable_destroy(&router_xm->flush_ht);
- rhashtable_destroy(&router_xm->ltable_ht);
- kfree(router_xm);
-}
-
-bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm;
-
- return router_xm && router_xm->ipv4_supported;
-}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index fe663b0ab708..39904dacf4f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -106,8 +106,8 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_init;
- devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
- mlxsw_sp_span_occ_get, mlxsw_sp);
+ devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
+ mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
@@ -123,7 +123,7 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
cancel_work_sync(&mlxsw_sp->span->work);
- devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
+ devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a6d2e806cba9..4efccd942fb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -48,7 +48,8 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
- struct list_head mids_list;
+ struct list_head mdb_list;
+ struct rhashtable mdb_ht;
u8 vlan_enabled:1,
multicast_enabled:1,
mrouter:1;
@@ -102,6 +103,33 @@ struct mlxsw_sp_switchdev_ops {
void (*init)(struct mlxsw_sp *mlxsw_sp);
};
+struct mlxsw_sp_mdb_entry_key {
+ unsigned char addr[ETH_ALEN];
+ u16 fid;
+};
+
+struct mlxsw_sp_mdb_entry {
+ struct list_head list;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mdb_entry_key key;
+ u16 mid;
+ struct list_head ports_list;
+ u16 ports_count;
+};
+
+struct mlxsw_sp_mdb_entry_port {
+ struct list_head list; /* Member of 'ports_list'. */
+ u16 local_port;
+ refcount_t refcount;
+ bool mrouter;
+};
+
+static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
+};
+
static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
@@ -109,12 +137,13 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port);
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index);
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device
- *bridge_device);
+ *bridge_device, bool mc_enabled);
static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -237,6 +266,10 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
if (!bridge_device)
return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_mdb_rhashtable_init;
+
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
@@ -254,7 +287,8 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
- INIT_LIST_HEAD(&bridge_device->mids_list);
+ INIT_LIST_HEAD(&bridge_device->mdb_list);
+
if (list_empty(&bridge->bridges_list))
mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
list_add(&bridge_device->list, &bridge->bridges_list);
@@ -273,6 +307,8 @@ err_vxlan_init:
list_del(&bridge_device->list);
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
+ rhashtable_destroy(&bridge_device->mdb_ht);
+err_mdb_rhashtable_init:
kfree(bridge_device);
return ERR_PTR(err);
}
@@ -290,7 +326,8 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
- WARN_ON(!list_empty(&bridge_device->mids_list));
+ WARN_ON(!list_empty(&bridge_device->mdb_list));
+ rhashtable_destroy(&bridge_device->mdb_ht);
kfree(bridge_device);
}
@@ -643,6 +680,64 @@ err_port_bridge_vlan_flood_set:
}
static int
+mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ int err;
+
+ list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
+ bridge_vlan_node) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
+ packet_type, local_port, member);
+ if (err)
+ goto err_fid_flood_set;
+ }
+
+ return 0;
+
+err_fid_flood_set:
+ list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
+ &bridge_vlan->port_vlan_list,
+ list) {
+ u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
+
+ mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
+ local_port, !member);
+ }
+
+ return err;
+}
+
+static int
+mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
+ enum mlxsw_sp_flood_type packet_type,
+ bool member)
+{
+ struct mlxsw_sp_bridge_vlan *bridge_vlan;
+ int err;
+
+ list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
+ err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ member);
+ if (err)
+ goto err_bridge_vlans_flood_set;
+ }
+
+ return 0;
+
+err_bridge_vlans_flood_set:
+ list_for_each_entry_continue_reverse(bridge_vlan,
+ &bridge_port->vlans_list, list)
+ mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
+ !member);
+ return err;
+}
+
+static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan,
bool set)
@@ -813,6 +908,9 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port)
return 0;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
+
if (!bridge_port->bridge_device->multicast_enabled)
goto out;
@@ -822,8 +920,6 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
- is_port_mrouter);
out:
bridge_port->mrouter = is_port_mrouter;
return 0;
@@ -842,6 +938,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *orig_dev,
bool mc_disabled)
{
+ enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
@@ -854,43 +951,184 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
- if (bridge_device->multicast_enabled != !mc_disabled) {
- bridge_device->multicast_enabled = !mc_disabled;
- mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
- bridge_device);
- }
+ if (bridge_device->multicast_enabled == !mc_disabled)
+ return 0;
+
+ bridge_device->multicast_enabled = !mc_disabled;
+ err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ !mc_disabled);
+ if (err)
+ goto err_mc_enable_sync;
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
- enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
bool member = mlxsw_sp_mc_flood(bridge_port);
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
- bridge_port,
- packet_type, member);
+ err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
+ packet_type,
+ member);
if (err)
- return err;
+ goto err_flood_table_set;
}
- bridge_device->multicast_enabled = !mc_disabled;
-
return 0;
+
+err_flood_table_set:
+ list_for_each_entry_continue_reverse(bridge_port,
+ &bridge_device->ports_list, list) {
+ bool member = mlxsw_sp_mc_flood(bridge_port);
+
+ mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
+ !member);
+ }
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
+ mc_disabled);
+err_mc_enable_sync:
+ bridge_device->multicast_enabled = mc_disabled;
+ return err;
+}
+
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
+ if (mdb_entry_port->local_port == local_port)
+ return mdb_entry_port;
+ }
+
+ return NULL;
}
-static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
- u16 mid_idx, bool add)
+static struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
{
- char *smid2_pl;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
int err;
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count++;
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx,
- mlxsw_sp_router_port(mlxsw_sp), add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+ mdb_entry->ports_count++;
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port, bool force)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
+ if (mdb_entry_port->mrouter &&
+ refcount_read(&mdb_entry_port->refcount) == 1)
+ mdb_entry->ports_count--;
+ return;
+ }
+
+ mdb_entry->ports_count--;
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+}
+
+static __always_unused struct mlxsw_sp_mdb_entry_port *
+mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ int err;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (mdb_entry_port) {
+ if (!mdb_entry_port->mrouter)
+ refcount_inc(&mdb_entry_port->refcount);
+ return mdb_entry_port;
+ }
+
+ err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, true);
+ if (err)
+ return ERR_PTR(err);
+
+ mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
+ if (!mdb_entry_port) {
+ err = -ENOMEM;
+ goto err_mdb_entry_port_alloc;
+ }
+
+ mdb_entry_port->local_port = local_port;
+ refcount_set(&mdb_entry_port->refcount, 1);
+ mdb_entry_port->mrouter = true;
+ list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
+
+ return mdb_entry_port;
+
+err_mdb_entry_port_alloc:
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
+ return ERR_PTR(err);
+}
+
+static __always_unused void
+mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ if (!mdb_entry_port->mrouter)
+ return;
+
+ mdb_entry_port->mrouter = false;
+ if (!refcount_dec_and_test(&mdb_entry_port->refcount))
+ return;
+
+ list_del(&mdb_entry_port->list);
+ kfree(mdb_entry_port);
+ mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
+ mdb_entry->key.fid, local_port, false);
}
static void
@@ -898,10 +1136,17 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device,
bool add)
{
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- list_for_each_entry(mid, &bridge_device->mids_list, list)
- mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
+ }
}
static int
@@ -1127,14 +1372,13 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last_port, last_vlan;
+ bool last_port;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
- last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
last_port = list_is_singular(&bridge_vlan->port_vlan_list);
@@ -1146,8 +1390,9 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
- if (last_vlan)
- mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
+ mlxsw_sp_fid_index(fid));
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
@@ -1436,7 +1681,8 @@ static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
}
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
+ const char *mac, u16 fid, u16 vid,
+ bool adding,
enum mlxsw_reg_sfd_rec_action action,
enum mlxsw_reg_sfd_rec_policy policy)
{
@@ -1449,7 +1695,8 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
+ local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1464,18 +1711,18 @@ out:
}
static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+ const char *mac, u16 fid, u16 vid,
+ bool adding, bool dynamic)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
+ adding, MLXSW_REG_SFD_REC_ACTION_NOP,
mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding)
{
- return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
@@ -1537,7 +1784,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->lagged)
return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
bridge_port->system_port,
- fdb_info->addr, fid_index,
+ fdb_info->addr, fid_index, vid,
adding, false);
else
return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
@@ -1546,8 +1793,9 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
vid, adding, false);
}
-static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid_idx, bool adding)
+static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mdb_entry *mdb_entry,
+ bool adding)
{
char *sfd_pl;
u8 num_rec;
@@ -1558,8 +1806,9 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
+ mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ mdb_entry->mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1573,79 +1822,17 @@ out:
return err;
}
-static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap,
- bool set_router_port)
-{
- char *smid2_pl;
- int err, i;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, 0, false);
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid2_port_mask_set(smid2_pl, i, 1);
- }
-
- mlxsw_reg_smid2_port_mask_set(smid2_pl,
- mlxsw_sp_router_port(mlxsw_sp), 1);
-
- for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
- mlxsw_reg_smid2_port_set(smid2_pl, i, 1);
-
- mlxsw_reg_smid2_port_set(smid2_pl, mlxsw_sp_router_port(mlxsw_sp),
- set_router_port);
-
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 mid_idx, bool add)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *smid2_pl;
- int err;
-
- smid2_pl = kmalloc(MLXSW_REG_SMID2_LEN, GFP_KERNEL);
- if (!smid2_pl)
- return -ENOMEM;
-
- mlxsw_reg_smid2_pack(smid2_pl, mid_idx, mlxsw_sp_port->local_port, add);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid2), smid2_pl);
- kfree(smid2_pl);
- return err;
-}
-
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
-{
- struct mlxsw_sp_mid *mid;
-
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
- return mid;
- }
- return NULL;
-}
-
static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
- unsigned long *ports_bitmap)
+ struct mlxsw_sp_ports_bitmap *ports_bm)
{
struct mlxsw_sp_port *mlxsw_sp_port;
u64 max_lag_members, i;
int lag_id;
if (!bridge_port->lagged) {
- set_bit(bridge_port->system_port, ports_bitmap);
+ set_bit(bridge_port->system_port, ports_bm->bitmap);
} else {
max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_LAG_MEMBERS);
@@ -1655,13 +1842,13 @@ mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
lag_id, i);
if (mlxsw_sp_port)
set_bit(mlxsw_sp_port->local_port,
- ports_bitmap);
+ ports_bm->bitmap);
}
}
}
static void
-mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp *mlxsw_sp)
{
@@ -1671,116 +1858,226 @@ mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
if (bridge_port->mrouter) {
mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
bridge_port,
- flood_bitmap);
+ flood_bm);
}
}
}
-static bool
-mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid,
- struct mlxsw_sp_bridge_device *bridge_device)
+static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ unsigned int nbits = ports_bm->nbits;
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, nbits) {
+ mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
+ mdb_entry,
+ i);
+ if (IS_ERR(mdb_entry_port)) {
+ nbits = i;
+ goto err_mrouter_port_get;
+ }
+ }
+
+ return 0;
+
+err_mrouter_port_get:
+ for_each_set_bit(i, ports_bm->bitmap, nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+ return PTR_ERR(mdb_entry_port);
+}
+
+static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ports_bitmap *ports_bm,
+ struct mlxsw_sp_mdb_entry *mdb_entry)
+{
+ int i;
+
+ for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
+}
+
+static int
+mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
{
- long *flood_bitmap;
- int num_of_ports;
- u16 mid_idx;
+ struct mlxsw_sp_ports_bitmap ports_bm;
int err;
- mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
- MLXSW_SP_MID_MAX);
- if (mid_idx == MLXSW_SP_MID_MAX)
- return false;
+ err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
+ if (err)
+ return err;
- num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
- flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL);
- if (!flood_bitmap)
- return false;
+ mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
+
+ if (add)
+ err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
+ mdb_entry);
+ else
+ mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
+
+ mlxsw_sp_port_bitmap_fini(&ports_bm);
+ return err;
+}
- bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
- mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
+{
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
- mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
- bridge_device->mrouter);
- bitmap_free(flood_bitmap);
+ ether_addr_copy(mdb_entry->key.addr, addr);
+ mdb_entry->key.fid = fid;
+ err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
if (err)
- return false;
+ goto err_pgt_mid_alloc;
+
+ INIT_LIST_HEAD(&mdb_entry->ports_list);
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
- true);
+ err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
+ true);
if (err)
- return false;
+ goto err_mdb_mrouters_set;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = true;
- return true;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port)) {
+ err = PTR_ERR(mdb_entry_port);
+ goto err_mdb_entry_port_get;
+ }
+
+ if (bridge_device->multicast_enabled) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
+ if (err)
+ goto err_mdb_entry_write;
+ }
+
+ err = rhashtable_insert_fast(&bridge_device->mdb_ht,
+ &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
+
+ return mdb_entry;
+
+err_rhashtable_insert:
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+err_mdb_entry_write:
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
+err_mdb_entry_port_get:
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+err_mdb_mrouters_set:
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+err_pgt_mid_alloc:
+ kfree(mdb_entry);
+ return ERR_PTR(err);
}
-static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ u16 local_port, bool force)
{
- if (!mid->in_hw)
- return 0;
-
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- mid->in_hw = false;
- return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
- false);
+ list_del(&mdb_entry->list);
+ rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
+ mlxsw_sp_mdb_ht_params);
+ if (bridge_device->multicast_enabled)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
+ mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
+ WARN_ON(!list_empty(&mdb_entry->ports_list));
+ mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
+ kfree(mdb_entry);
}
-static struct
-mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_bridge_device *bridge_device,
- const unsigned char *addr,
- u16 fid)
+static struct mlxsw_sp_mdb_entry *
+mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr, u16 fid, u16 local_port)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
- mid = kzalloc(sizeof(*mid), GFP_KERNEL);
- if (!mid)
- return NULL;
+ ether_addr_copy(key.addr, addr);
+ key.fid = fid;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (mdb_entry) {
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
- GFP_KERNEL);
- if (!mid->ports_in_mid)
- goto err_ports_in_mid_alloc;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
+ mdb_entry,
+ local_port);
+ if (IS_ERR(mdb_entry_port))
+ return ERR_CAST(mdb_entry_port);
- ether_addr_copy(mid->addr, addr);
- mid->fid = fid;
- mid->in_hw = false;
+ return mdb_entry;
+ }
- if (!bridge_device->multicast_enabled)
- goto out;
+ return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
+ local_port);
+}
- if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
- goto err_write_mdb_entry;
+static bool
+mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
+ struct mlxsw_sp_mdb_entry_port *removed_entry_port,
+ bool force)
+{
+ if (mdb_entry->ports_count > 1)
+ return false;
-out:
- list_add_tail(&mid->list, &bridge_device->mids_list);
- return mid;
+ if (force)
+ return true;
-err_write_mdb_entry:
- bitmap_free(mid->ports_in_mid);
-err_ports_in_mid_alloc:
- kfree(mid);
- return NULL;
+ if (!removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 1)
+ return false;
+
+ if (removed_entry_port->mrouter &&
+ refcount_read(&removed_entry_port->refcount) > 2)
+ return false;
+
+ return true;
}
-static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_mid *mid)
+static void
+mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
+ bool force)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int err = 0;
+ struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
- clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
- if (bitmap_empty(mid->ports_in_mid,
- mlxsw_core_max_ports(mlxsw_sp->core))) {
- err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
- list_del(&mid->list);
- bitmap_free(mid->ports_in_mid);
- kfree(mid);
- }
- return err;
+ mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
+ if (!mdb_entry_port)
+ return;
+
+ /* Avoid a temporary situation in which the MDB entry points to an empty
+ * PGT entry, as otherwise packets will be temporarily dropped instead
+ * of being flooded. Instead, in this situation, call
+ * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
+ * then releases the PGT entry.
+ */
+ if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
+ mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
+ local_port, force);
+ else
+ mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
+ force);
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1789,12 +2086,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1809,54 +2104,35 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
- fid_index);
- if (!mid) {
- netdev_err(dev, "Unable to allocate MC group\n");
- return -ENOMEM;
- }
- }
- set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
-
- if (!bridge_device->multicast_enabled)
- return 0;
-
- if (bridge_port->mrouter)
- return 0;
-
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set SMID\n");
- goto err_out;
- }
+ mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
+ mdb->addr, fid_index,
+ mlxsw_sp_port->local_port);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
return 0;
-
-err_out:
- mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- return err;
}
-static void
-mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_device
- *bridge_device)
+static int
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool mc_enabled)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_mid *mid;
- bool mc_enabled;
-
- mc_enabled = bridge_device->multicast_enabled;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
+ int err;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (mc_enabled)
- mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
- bridge_device);
- else
- mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
+ if (err)
+ goto err_mdb_entry_write;
}
+ return 0;
+
+err_mdb_entry_write:
+ list_for_each_entry_continue_reverse(mdb_entry,
+ &bridge_device->mdb_list, list)
+ mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
+ return err;
}
static void
@@ -1864,14 +2140,20 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
bool add)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid;
+ u16 local_port = mlxsw_sp_port->local_port;
+ struct mlxsw_sp_mdb_entry *mdb_entry;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry(mid, &bridge_device->mids_list, list) {
- if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
+ if (add)
+ mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
+ local_port);
+ else
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
+ local_port);
}
}
@@ -1949,28 +2231,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int
-__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_mid *mid)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- if (bridge_port->bridge_device->multicast_enabled &&
- !bridge_port->mrouter) {
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
- }
-
- err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
-
- return err;
-}
-
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1980,7 +2240,8 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_device *bridge_device;
struct net_device *dev = mlxsw_sp_port->dev;
struct mlxsw_sp_bridge_port *bridge_port;
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_mdb_entry_key key = {};
+ struct mlxsw_sp_mdb_entry *mdb_entry;
u16 fid_index;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
@@ -1996,32 +2257,44 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
- if (!mid) {
+ ether_addr_copy(key.addr, mdb->addr);
+ key.fid = fid_index;
+ mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
+ mlxsw_sp_mdb_ht_params);
+ if (!mdb_entry) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ mlxsw_sp_port->local_port, false);
+ return 0;
}
static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ u16 fid_index)
{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- struct mlxsw_sp_mid *mid, *tmp;
+ struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
+ u16 local_port = mlxsw_sp_port->local_port;
bridge_device = bridge_port->bridge_device;
- list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
- if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
- __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
- mid);
- } else if (bridge_device->multicast_enabled &&
- bridge_port->mrouter) {
- mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
- }
+ list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
+ list) {
+ if (mdb_entry->key.fid != fid_index)
+ continue;
+
+ if (bridge_port->mrouter)
+ mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
+ mdb_entry,
+ local_port);
+
+ mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
+ local_port, true);
}
}
@@ -2633,10 +2906,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_port *mlxsw_sp_port;
+ u16 local_port, vid, fid, evid = 0;
enum switchdev_notifier_type type;
char mac[ETH_ALEN];
- u16 local_port;
- u16 vid, fid;
bool do_notification = true;
int err;
@@ -2667,9 +2939,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
+ evid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
- err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
+ err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
adding, true);
if (err) {
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
@@ -2729,8 +3002,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
- mlxsw_sp_port_vlan->vid : 0;
+ lag_vid = mlxsw_sp_port_vlan->vid;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index ed4d0d3448f3..f4bfdb6dab9c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -953,16 +953,16 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPBC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
MIRROR),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ARPUC, NEIGH_DISCOVERY, MIRROR_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
@@ -1298,8 +1298,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->policers_count; i++) {
policer_item = &trap->policer_items_arr[i];
- err = devlink_trap_policers_register(devlink,
- &policer_item->policer, 1);
+ err = devl_trap_policers_register(devlink,
+ &policer_item->policer, 1);
if (err)
goto err_trap_policer_register;
}
@@ -1309,8 +1309,8 @@ static int mlxsw_sp_trap_policers_init(struct mlxsw_sp *mlxsw_sp)
err_trap_policer_register:
for (i--; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
return err;
@@ -1325,8 +1325,8 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
for (i = trap->policers_count - 1; i >= 0; i--) {
policer_item = &trap->policer_items_arr[i];
- devlink_trap_policers_unregister(devlink,
- &policer_item->policer, 1);
+ devl_trap_policers_unregister(devlink,
+ &policer_item->policer, 1);
}
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
@@ -1381,8 +1381,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
- err = devlink_trap_groups_register(devlink, &group_item->group,
- 1);
+ err = devl_trap_groups_register(devlink, &group_item->group, 1);
if (err)
goto err_trap_group_register;
}
@@ -1392,7 +1391,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
err_trap_group_register:
for (i--; i >= 0; i--) {
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
@@ -1408,7 +1407,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_group_item *group_item;
group_item = &trap->group_items_arr[i];
- devlink_trap_groups_unregister(devlink, &group_item->group, 1);
+ devl_trap_groups_unregister(devlink, &group_item->group, 1);
}
mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
@@ -1469,8 +1468,8 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
- err = devlink_traps_register(devlink, &trap_item->trap, 1,
- mlxsw_sp);
+ err = devl_traps_register(devlink, &trap_item->trap, 1,
+ mlxsw_sp);
if (err)
goto err_trap_register;
}
@@ -1480,7 +1479,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
err_trap_register:
for (i--; i >= 0; i--) {
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
@@ -1496,7 +1495,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
trap_item = &trap->trap_items_arr[i];
- devlink_traps_unregister(devlink, &trap_item->trap, 1);
+ devl_traps_unregister(devlink, &trap_item->trap, 1);
}
mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index d888498aed33..8da169663bda 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -27,8 +27,6 @@ enum {
MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
- MLXSW_TRAP_ID_ARPBC = 0x50,
- MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -71,6 +69,8 @@ enum {
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
+ MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,