summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c698
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h230
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c620
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c292
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c206
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c253
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c824
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
73 files changed, 3723 insertions, 1907 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 1a0c3bf86ead..752a72499b4f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (priv->cee_config.pfc_state) {
int tc;
+ rx_ppp = prof->rx_ppp;
+ tx_ppp = prof->tx_ppp;
- priv->prof->rx_pause = 0;
- priv->prof->tx_pause = 0;
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_full:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp |= tc_mask;
break;
case pfc_enabled_tx:
- priv->prof->tx_ppp |= tc_mask;
- priv->prof->rx_ppp &= ~tc_mask;
+ tx_ppp |= tc_mask;
+ rx_ppp &= ~tc_mask;
break;
case pfc_enabled_rx:
- priv->prof->tx_ppp &= ~tc_mask;
- priv->prof->rx_ppp |= tc_mask;
+ tx_ppp &= ~tc_mask;
+ rx_ppp |= tc_mask;
break;
default:
break;
}
}
- en_dbg(DRV, priv, "Set pfc on\n");
+ rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
+ tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
} else {
- priv->prof->rx_pause = 1;
- priv->prof->tx_pause = 1;
- en_dbg(DRV, priv, "Set pfc off\n");
+ rx_ppp = 0;
+ tx_ppp = 0;
+ rx_pause = prof->rx_pause;
+ tx_pause = prof->tx_pause;
}
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp)) {
+ tx_pause, tx_ppp, rx_pause, rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
return 1;
}
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->tx_pause = tx_pause;
+ prof->rx_pause = rx_pause;
+
return 0;
}
@@ -408,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
+ u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
@@ -416,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);
- prof->rx_pause = !pfc->pfc_en;
- prof->tx_pause = !pfc->pfc_en;
- prof->rx_ppp = pfc->pfc_en;
- prof->tx_ppp = pfc->pfc_en;
+ rx_pause = prof->rx_pause && !pfc->pfc_en;
+ tx_pause = prof->tx_pause && !pfc->pfc_en;
+ rx_ppp = pfc->pfc_en;
+ tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- prof->tx_pause,
- prof->tx_ppp,
- prof->rx_pause,
- prof->rx_ppp);
- if (err)
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- prof->rx_ppp, prof->rx_pause,
- prof->tx_ppp, prof->tx_pause);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ prof->tx_ppp = tx_ppp;
+ prof->rx_ppp = rx_ppp;
+ prof->rx_pause = rx_pause;
+ prof->tx_pause = tx_pause;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ebc1f566a4d9..a30a2e95d13f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -199,6 +199,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"rx_xdp_drop",
"rx_xdp_tx",
"rx_xdp_tx_full",
+
+ /* phy statistics */
+ "rx_packets_phy", "rx_bytes_phy",
+ "tx_packets_phy", "tx_bytes_phy",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
@@ -411,6 +415,10 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+ for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->phy_stats)[i];
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
data[index++] = priv->tx_ring[TX][i]->packets;
data[index++] = priv->tx_ring[TX][i]->bytes;
@@ -490,6 +498,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PHY_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
@@ -1046,27 +1060,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
if (pause->autoneg)
return -EINVAL;
- priv->prof->tx_pause = pause->tx_pause != 0;
- priv->prof->rx_pause = pause->rx_pause != 0;
+ tx_pause = !!(pause->tx_pause);
+ rx_pause = !!(pause->rx_pause);
+ rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
+ tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
- en_err(priv, "Failed setting pause params\n");
- else
- mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
- priv->prof->rx_ppp,
- priv->prof->rx_pause,
- priv->prof->tx_ppp,
- priv->prof->tx_pause);
+ tx_pause, tx_ppp, rx_pause, rx_ppp);
+ if (err) {
+ en_err(priv, "Failed setting pause params, err = %d\n", err);
+ return err;
+ }
+
+ mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
+ rx_ppp, rx_pause, tx_ppp, tx_pause);
+
+ priv->prof->tx_pause = tx_pause;
+ priv->prof->rx_pause = rx_pause;
+ priv->prof->tx_ppp = tx_ppp;
+ priv->prof->rx_ppp = rx_ppp;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2c2965497ed3..d25e16d2c319 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -163,9 +163,9 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
- params->prof[i].rx_pause = 1;
+ params->prof[i].rx_pause = !(pfcrx || pfctx);
params->prof[i].rx_ppp = pfcrx;
- params->prof[i].tx_pause = 1;
+ params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8fc51bc29003..e0adac4a9a19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3256,6 +3256,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
last_i += NUM_XDP_STATS;
+
+ if (!mlx4_is_slave(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
+ last_i += NUM_PHY_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -3630,10 +3634,6 @@ int mlx4_en_reset_config(struct net_device *dev,
mlx4_en_stop_port(dev, 1);
}
- en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 1fa4849a6f56..0158b88bea5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -275,19 +275,31 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
- if (mlx4_is_master(mdev->dev)) {
- stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
- &mlx4_en_stats->RTOT_prio_1,
- NUM_PRIORITIES);
- stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
- &mlx4_en_stats->TTOT_prio_1,
- NUM_PRIORITIES);
- stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
- &mlx4_en_stats->ROCT_prio_1,
- NUM_PRIORITIES);
- stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
- &mlx4_en_stats->TOCT_prio_1,
- NUM_PRIORITIES);
+ if (!mlx4_is_slave(mdev->dev)) {
+ struct mlx4_en_phy_stats *p_stats = &priv->phy_stats;
+
+ p_stats->rx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+ &mlx4_en_stats->RTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+ &mlx4_en_stats->TTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->rx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+ &mlx4_en_stats->ROCT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+ &mlx4_en_stats->TOCT_prio_1,
+ NUM_PRIORITIES);
+ if (mlx4_is_master(mdev->dev)) {
+ stats->rx_packets = p_stats->rx_packets_phy;
+ stats->tx_packets = p_stats->tx_packets_phy;
+ stats->rx_bytes = p_stats->rx_bytes_phy;
+ stats->tx_bytes = p_stats->tx_bytes_phy;
+ }
}
/* net device stats */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b4d144e67514..5c613c6663da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -291,13 +291,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
- ring->rx_info = vzalloc_node(tmp, node);
+ ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
if (!ring->rx_info) {
- ring->rx_info = vzalloc(tmp);
- if (!ring->rx_info) {
- err = -ENOMEM;
- goto err_xdp_info;
- }
+ err = -ENOMEM;
+ goto err_xdp_info;
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
@@ -318,7 +315,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
return 0;
err_info:
- vfree(ring->rx_info);
+ kvfree(ring->rx_info);
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
@@ -447,7 +444,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
bpf_prog_put(old_prog);
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
- vfree(ring->rx_info);
+ kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -649,6 +646,12 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
+#else
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
+#endif
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -662,12 +665,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int index;
- if (unlikely(!priv->port_up))
+ if (unlikely(!priv->port_up || budget <= 0))
return 0;
- if (unlikely(budget <= 0))
- return polled;
-
ring = priv->rx_ring[cq_ring];
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
@@ -821,14 +821,12 @@ xdp_drop_no_cnt:
skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
- if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
- MLX4_CQE_STATUS_UDP)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP)) &&
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ cqe->checksum == cpu_to_be16(0xffff)) {
bool l2_tunnel;
- if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
- cqe->checksum == cpu_to_be16(0xffff)))
- goto csum_none;
-
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
ip_summed = CHECKSUM_UNNECESSARY;
@@ -838,12 +836,7 @@ xdp_drop_no_cnt:
ring->csum_ok++;
} else {
if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-#if IS_ENABLED(CONFIG_IPV6)
- MLX4_CQE_STATUS_IPV6))))
-#else
- 0))))
-#endif
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
goto csum_none;
if (check_csum(cqe, skb, va, dev->features))
goto csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 958619ff24ae..5af50191c181 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2994,10 +2994,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- if (mlx4_is_mfunc(dev))
- info->port_attr.attr.mode = S_IRUGO;
- else {
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev)) {
+ info->port_attr.attr.mode = 0444;
+ } else {
+ info->port_attr.attr.mode = 0644;
info->port_attr.store = set_port_type;
}
info->port_attr.show = show_port_type;
@@ -3012,10 +3012,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
info->port_mtu_attr.attr.name = info->dev_mtu_name;
- if (mlx4_is_mfunc(dev))
- info->port_mtu_attr.attr.mode = S_IRUGO;
- else {
- info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev)) {
+ info->port_mtu_attr.attr.mode = 0444;
+ } else {
+ info->port_mtu_attr.attr.mode = 0644;
info->port_mtu_attr.store = set_port_ib_mtu;
}
info->port_mtu_attr.show = show_port_ib_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f470ae37d937..f7c81133594f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
struct mlx4_en_xdp_stats xdp_stats;
+ struct mlx4_en_phy_stats phy_stats;
struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index aab28eb27a30..86b6051da8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -63,6 +63,14 @@ struct mlx4_en_xdp_stats {
#define NUM_XDP_STATS 3
};
+struct mlx4_en_phy_stats {
+ unsigned long rx_packets_phy;
+ unsigned long rx_bytes_phy;
+ unsigned long tx_packets_phy;
+ unsigned long tx_bytes_phy;
+#define NUM_PHY_STATS 4
+};
+
#define NUM_MAIN_STATS 21
#define MLX4_NUM_PRIORITIES 8
@@ -116,7 +124,7 @@ enum {
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
- NUM_XDP_STATS)
+ NUM_XDP_STATS + NUM_PHY_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 606a0e0beeae..29e50f787349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5088,6 +5088,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule->mirr_mbox);
kfree(fs_rule);
state = 0;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 25deaa5a534c..c032319f1cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -46,7 +46,7 @@ config MLX5_MPFS
config MLX5_ESWITCH
bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
- depends on MLX5_CORE_EN
+ depends on MLX5_CORE_EN && NET_SWITCHDEV
default y
---help---
Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e9a1fbcc4adf..21cd1703a862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -359,6 +359,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
@@ -501,6 +502,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
@@ -1802,7 +1804,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
- cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+ cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 3816b4506561..d93ff567b40d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
"%pI4");
} else if (ethertype.v == ETH_P_IPV6) {
static const struct in6_addr full_ones = {
- .in6_u.u6_addr32 = {htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff)},
+ .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff)},
};
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index a6ba57fbb414..09f178a3fcab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -136,6 +136,8 @@ TRACE_EVENT(mlx5_fs_del_fg,
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\
+ {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4c9360b25532..30cad07be2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -57,24 +57,12 @@
#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
-#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
+#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
+#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
-#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
-
#define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -93,15 +81,31 @@
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
- MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
-#define MLX5E_REQUIRED_MTTS(wqes) \
- (wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
-#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
+#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
+#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5E_MAX_RQ_NUM_MTTS \
+ ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
+ (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
+#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
+ (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
+ (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
+ MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
-#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
@@ -124,9 +128,15 @@
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_ICOSQ_MAX_WQEBBS \
- (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+#define MLX5E_UMR_WQE_INLINE_SZ \
+ (sizeof(struct mlx5e_umr_wqe) + \
+ ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
+ MLX5_UMR_MTT_ALIGNMENT))
+#define MLX5E_UMR_WQEBBS \
+ (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
+#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
#define MLX5E_XDP_TX_DS_COUNT \
@@ -156,26 +166,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
}
}
-static inline int mlx5_min_log_rq_size(int wq_type)
-{
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
- default:
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
- }
-}
-
-static inline int mlx5_max_log_rq_size(int wq_type)
-{
- switch (wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
- default:
- return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
- }
-}
-
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
@@ -198,7 +188,7 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
- struct mlx5_wqe_data_seg data;
+ struct mlx5_mtt inline_mtts[0];
};
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
@@ -207,12 +197,14 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
"tx_cqe_moder",
"rx_cqe_compress",
+ "rx_striding_rq",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
+ MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -232,10 +224,7 @@ enum mlx5e_priv_flag {
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
- u16 rq_headroom;
- u8 mpwqe_log_stride_sz;
- u8 mpwqe_log_num_strides;
- u8 log_rq_size;
+ u8 log_rq_mtu_frames;
u16 num_channels;
u8 num_tc;
bool rx_cqe_compress_def;
@@ -243,7 +232,6 @@ struct mlx5e_params {
struct net_dim_cq_moder tx_cq_moderation;
bool lro_en;
u32 lro_wqe_sz;
- u16 tx_max_inline;
u8 tx_min_inline_mode;
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
@@ -254,6 +242,8 @@ struct mlx5e_params {
u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
+ unsigned int sw_mtu;
+ int hard_mtu;
};
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -336,6 +326,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
};
@@ -369,7 +360,6 @@ struct mlx5e_txqsq {
void __iomem *uar_map;
struct netdev_queue *txq;
u32 sqn;
- u16 max_inline;
u8 min_inline_mode;
u16 edge;
struct device *pdev;
@@ -383,6 +373,10 @@ struct mlx5e_txqsq {
struct mlx5e_channel *channel;
int txq_ix;
u32 rate_limit;
+ struct mlx5e_txqsq_recover {
+ struct work_struct recover_work;
+ u64 last_recover;
+ } recover;
} ____cacheline_aligned_in_smp;
struct mlx5e_xdpsq {
@@ -432,7 +426,6 @@ struct mlx5e_icosq {
void __iomem *uar_map;
u32 sqn;
u16 edge;
- __be32 mkey_be;
unsigned long state;
/* control path */
@@ -457,16 +450,13 @@ struct mlx5e_wqe_frag_info {
};
struct mlx5e_umr_dma_info {
- __be64 *mtt;
- dma_addr_t mtt_addr;
struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
- struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
- u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+ DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
};
/* a single cache unit is capable to serve one napi call (for non-striding rq)
@@ -483,9 +473,16 @@ struct mlx5e_page_cache {
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
+typedef struct sk_buff *
+(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+enum mlx5e_rq_flag {
+ MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
@@ -496,12 +493,12 @@ struct mlx5e_rq {
u32 frag_sz; /* max possible skb frag_sz */
union {
bool page_reuse;
- bool xdp_xmit;
};
} wqe;
struct {
+ struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
- void *mtt_no_align;
+ mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
u16 num_strides;
u8 log_stride_sz;
bool umr_in_progress;
@@ -533,7 +530,9 @@ struct mlx5e_rq {
/* XDP */
struct bpf_prog *xdp_prog;
+ unsigned int hw_mtu;
struct mlx5e_xdpsq xdpsq;
+ DECLARE_BITMAP(flags, 8);
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -766,7 +765,6 @@ struct mlx5e_priv {
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
u32 tx_rates[MLX5E_MAX_NUM_SQS];
- int hard_mtu;
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -781,7 +779,8 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_stats stats;
struct hwtstamp_config tstamp;
- u16 q_counter;
+ u16 q_counter;
+ u16 drop_rq_q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
@@ -831,6 +830,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+
void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -840,6 +843,12 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx);
void mlx5e_update_stats(struct mlx5e_priv *priv);
@@ -916,9 +925,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params,
- u8 rq_type);
+ struct mlx5e_params *params);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
@@ -970,11 +979,6 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
}
-static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
-{
- return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
@@ -1010,7 +1014,6 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#endif
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
@@ -1061,7 +1064,6 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
int mlx5e_bits_invert(unsigned long a, int size);
@@ -1102,7 +1104,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 max_channels);
+ u16 max_channels, u16 mtu);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index cc8048f68f11..37fd0245b6c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -203,9 +203,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
{
int i, idx = 0;
- if (!data)
- return;
-
mutex_lock(&priv->state_lock);
mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
@@ -223,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
-static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
- int num_wqe)
-{
- int packets_per_wqe;
- int stride_size;
- int num_strides;
- int wqe_size;
-
- if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- return num_wqe;
-
- stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
- wqe_size = stride_size * num_strides;
-
- packets_per_wqe = wqe_size /
- ALIGN(ETH_DATA_LEN, stride_size);
- return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
-}
-
-static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
- int num_packets)
-{
- int packets_per_wqe;
- int stride_size;
- int num_strides;
- int wqe_size;
- int num_wqes;
-
- if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- return num_packets;
-
- stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
- num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
- wqe_size = stride_size * num_strides;
-
- num_packets = (1 << order_base_2(num_packets));
-
- packets_per_wqe = wqe_size /
- ALIGN(ETH_DATA_LEN, stride_size);
- num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
- return 1 << (order_base_2(num_wqes));
-}
-
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- int rq_wq_type = priv->channels.params.rq_wq_type;
-
- param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << priv->channels.params.log_rq_size);
+ param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
}
@@ -291,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev,
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
- int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {};
- u32 rx_pending_wqes;
- u32 min_rq_size;
u8 log_rq_size;
u8 log_sq_size;
- u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
@@ -311,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
return -EINVAL;
}
- min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
- 1 << mlx5_min_log_rq_size(rq_wq_type));
- rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
- param->rx_pending);
-
- if (param->rx_pending < min_rq_size) {
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- min_rq_size);
- return -EINVAL;
- }
-
- num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
- if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
- !MLX5E_VALID_NUM_MTTS(num_mtts)) {
- netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
- __func__, param->rx_pending);
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
@@ -338,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
return -EINVAL;
}
- log_rq_size = order_base_2(rx_pending_wqes);
+ log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
- if (log_rq_size == priv->channels.params.log_rq_size &&
+ if (log_rq_size == priv->channels.params.log_rq_mtu_frames &&
log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
- new_channels.params.log_rq_size = log_rq_size;
+ new_channels.params.log_rq_mtu_frames = log_rq_size;
new_channels.params.log_sq_size = log_sq_size;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
@@ -477,6 +409,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return mlx5e_ethtool_get_coalesce(priv, coal);
}
+#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
+#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
+
static void
mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
@@ -511,6 +446,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
+ netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
+ __func__, MLX5E_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
+ coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
+ netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
+ __func__, MLX5E_MAX_COAL_FRAMES);
+ return -ERANGE;
+ }
+
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
@@ -1066,16 +1015,66 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
return err;
}
+#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
+#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
+#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
+#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80
+#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \
+ max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \
+ (critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100)
+
+static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev,
+ u16 *pfc_prevention_tout)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
+ !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
+ return -EOPNOTSUPP;
+
+ return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL);
+}
+
+static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev,
+ u16 pfc_preven)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 critical_tout;
+ u16 minor;
+
+ if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
+ !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
+ return -EOPNOTSUPP;
+
+ critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ?
+ MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC :
+ pfc_preven;
+
+ if (critical_tout != PFC_STORM_PREVENTION_DISABLE &&
+ (critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC ||
+ critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) {
+ netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n",
+ __func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC,
+ MLX5E_PFC_PREVEN_TOUT_MAX_MSEC);
+ return -EINVAL;
+ }
+
+ minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout);
+ return mlx5_set_port_stall_watermark(mdev, critical_tout,
+ minor);
+}
+
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
- const struct mlx5e_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
switch (tuna->id) {
- case ETHTOOL_TX_COPYBREAK:
- *(u32 *)data = priv->channels.params.tx_max_inline;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ err = mlx5e_get_pfc_prevention_tout(dev, data);
break;
default:
err = -EINVAL;
@@ -1090,34 +1089,13 @@ static int mlx5e_set_tunable(struct net_device *dev,
const void *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_channels new_channels = {};
- int err = 0;
- u32 val;
+ int err;
mutex_lock(&priv->state_lock);
switch (tuna->id) {
- case ETHTOOL_TX_COPYBREAK:
- val = *(u32 *)data;
- if (val > mlx5e_get_max_inline_cap(mdev)) {
- err = -EINVAL;
- break;
- }
-
- new_channels.params = priv->channels.params;
- new_channels.params.tx_max_inline = val;
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- break;
- }
-
- err = mlx5e_open_channels(priv, &new_channels);
- if (err)
- break;
- mlx5e_switch_priv_channels(priv, &new_channels, NULL);
-
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data);
break;
default:
err = -EINVAL;
@@ -1507,11 +1485,6 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- new_channels.params.mpwqe_log_stride_sz =
- MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
- new_channels.params.mpwqe_log_num_strides =
- MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
-
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
return 0;
@@ -1549,6 +1522,38 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
return 0;
}
+static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err;
+
+ if (enable) {
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
+ return -EINVAL;
+ }
+
+ new_channels.params = priv->channels.params;
+
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
+ mlx5e_set_rq_type(mdev, &new_channels.params);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag,
@@ -1594,6 +1599,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_STRIDING_RQ,
+ set_pflag_rx_striding_rq);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7bafa78a6c37..d68f51084f0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,56 +71,145 @@ struct mlx5e_channel_param {
struct mlx5e_cq_param icosq_cq;
};
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
+ bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
+ u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
+ bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+
+ if (!striding_rq_umr)
+ return false;
+ if (!inline_umr) {
+ mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
+ (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+ return false;
+ }
+ return true;
+}
+
+static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
+{
+ if (!params->xdp_prog) {
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+
+ return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
+ }
+
+ return PAGE_SIZE;
+}
+
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+{
+ u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+
+ return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+}
+
+static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+ s8 signed_log_num_strides_param;
+ u8 log_num_strides;
+
+ if (params->lro_en || frag_sz > PAGE_SIZE)
+ return false;
+
+ if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
+ return true;
+
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
+ signed_log_num_strides_param =
+ (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+
+ return signed_log_num_strides_param >= 0;
+}
+
+static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+{
+ if (params->log_rq_mtu_frames <
+ mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+
+ return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
+}
+
+static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
+
+ return MLX5E_MPWQE_STRIDE_SZ(mdev,
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5_MPWRQ_LOG_WQE_SZ -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params);
+}
+
+static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
+ return linear_rq_headroom;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return linear_rq_headroom;
+
+ return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+ struct mlx5e_params *params)
{
- params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
- params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- params->mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- params->rq_headroom += NET_IP_ALIGN;
-
/* Extra room needed for build_skb */
- params->lro_wqe_sz -= params->rq_headroom +
+ params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(params->log_rq_size),
- BIT(params->mpwqe_log_stride_sz),
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_init_rq_type_params(mdev, params, rq_type);
+ return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ !MLX5_IPSEC_DEV(mdev) &&
+ !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
+}
+
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -153,26 +242,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_tx_timeout_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- tx_timeout_work);
- int err;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
- if (err)
- netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
- err);
-unlock:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-}
-
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
int i;
@@ -235,107 +304,38 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
}
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the mtt array, we allocate
- * a little more.
- */
- return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
- MLX5_UMR_MTT_ALIGNMENT);
-}
-
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- struct mlx5e_umr_wqe *wqe,
- u16 ix)
+ struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+ u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->imm = rq->mkey_be;
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
- ucseg->bsf_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
- dseg->lkey = sq->mkey_be;
- dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
}
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
- int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
- int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
- goto err_out;
-
- /* We allocate more than mtt_sz as we will align the pointer */
- rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (unlikely(!rq->mpwqe.mtt_no_align))
- goto err_free_wqe_info;
-
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
- MLX5_UMR_ALIGN);
- wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
- PCI_DMA_TODEVICE);
- if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
- goto err_unmap_mtts;
+ return -ENOMEM;
- mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
- }
+ mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
return 0;
-
-err_unmap_mtts:
- while (--i >= 0) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
- PCI_DMA_TODEVICE);
- }
- kfree(rq->mpwqe.mtt_no_align);
-err_free_wqe_info:
- kfree(rq->mpwqe.info);
-
-err_out:
- return -ENOMEM;
-}
-
-static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
-{
- int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
- int i;
-
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
- PCI_DMA_TODEVICE);
- }
- kfree(rq->mpwqe.mtt_no_align);
- kfree(rq->mpwqe.info);
}
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
@@ -347,9 +347,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 *in;
int err;
- if (!MLX5E_VALID_NUM_MTTS(npages))
- return -EINVAL;
-
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -382,6 +379,11 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
+static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -415,6 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -428,11 +431,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = params->rq_headroom;
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
@@ -450,8 +452,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
- rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
+ rq->mpwqe.skb_from_cqe_mpwrq =
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
+ mlx5e_skb_from_cqe_mpwrq_linear :
+ mlx5e_skb_from_cqe_mpwrq_nonlinear;
+ rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
+ rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
@@ -490,7 +496,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
byte_count = params->lro_en ?
params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
+ MLX5E_SW2HW_MTU(params, params->sw_mtu);
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
@@ -510,9 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
+ u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
- wqe->data.addr = cpu_to_be64(dma_offset);
+ wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
}
wqe->data.byte_count = cpu_to_be32(byte_count);
@@ -558,7 +564,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- mlx5e_rq_free_mpwqe_info(rq);
+ kfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
@@ -615,8 +621,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int next_state)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
@@ -898,7 +903,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->mdev;
int err;
- sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
@@ -953,6 +957,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
+static void mlx5e_sq_recover(struct work_struct *work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
@@ -970,8 +975,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->channel = c;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
- sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode;
+ INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
@@ -1038,6 +1043,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
@@ -1156,9 +1162,20 @@ err_free_txqsq:
return err;
}
+static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
+{
+ WARN_ONCE(sq->cc != sq->pc,
+ "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+ sq->cc = 0;
+ sq->dma_fifo_cc = 0;
+ sq->pc = 0;
+}
+
static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
@@ -1206,6 +1223,107 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
mlx5e_free_txqsq(sq);
}
+static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (sq->cc == sq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(sq->channel->netdev,
+ "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ struct mlx5e_modify_sq_param msp = {0};
+ int err;
+
+ msp.curr_state = curr_state;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_sq_recover(struct work_struct *work)
+{
+ struct mlx5e_txqsq_recover *recover =
+ container_of(work, struct mlx5e_txqsq_recover,
+ recover_work);
+ struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
+ recover);
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ u8 state;
+ int err;
+
+ err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+ sq->sqn, err);
+ return;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR) {
+ netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
+ return;
+ }
+
+ netif_tx_disable_queue(sq->txq);
+
+ if (mlx5e_wait_for_sq_flush(sq))
+ return;
+
+ /* If the interval between two consecutive recovers per SQ is too
+ * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
+ * If we reached this state, there is probably a bug that needs to be
+ * fixed. let's keep the queue close and let tx timeout cleanup.
+ */
+ if (jiffies_to_msecs(jiffies - recover->last_recover) <
+ MLX5E_SQ_RECOVER_MIN_INTERVAL) {
+ netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
+ sq->sqn);
+ return;
+ }
+
+ /* At this point, no new packets will arrive from the stack as TXQ is
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
+ * pending WQEs. SQ can safely reset the SQ.
+ */
+ if (mlx5e_sq_to_ready(sq, state))
+ return;
+
+ mlx5e_reset_txqsq_cc_pc(sq);
+ sq->stats.recover++;
+ recover->last_recover = jiffies;
+ mlx5e_activate_txqsq(sq);
+}
+
static int mlx5e_open_icosq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
@@ -1750,38 +1868,49 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
- MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ mlx5e_mpwqe_get_log_num_strides(mdev, params) -
+ MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ mlx5e_mpwqe_get_log_stride_size(mdev, params) -
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
}
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
- MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
- MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.linear = 1;
}
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
+
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -1820,15 +1949,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *cqc = param->cqc;
u8 log_cq_size;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- log_cq_size = params->log_rq_size;
+ log_cq_size = params->log_rq_mtu_frames;
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
@@ -2379,10 +2510,10 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
}
-static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u16 mtu)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
@@ -2394,9 +2525,9 @@ static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
return 0;
}
-static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u16 *mtu)
{
- struct mlx5_core_dev *mdev = priv->mdev;
u16 hw_mtu = 0;
int err;
@@ -2404,25 +2535,27 @@ static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
+ *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
+ struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
u16 mtu;
int err;
- err = mlx5e_set_mtu(priv, netdev->mtu);
+ err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
- mlx5e_query_mtu(priv, &mtu);
- if (mtu != netdev->mtu)
+ mlx5e_query_mtu(mdev, params, &mtu);
+ if (mtu != params->sw_mtu)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
- __func__, mtu, netdev->mtu);
+ __func__, mtu, params->sw_mtu);
- netdev->mtu = mtu;
+ params->sw_mtu = mtu;
return 0;
}
@@ -2576,6 +2709,9 @@ int mlx5e_open(struct net_device *netdev)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
+ if (mlx5e_vxlan_allowed(priv->mdev))
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
@@ -2641,18 +2777,22 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+
return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_cq_param cq_param = {};
struct mlx5e_rq_param rq_param = {};
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(&rq_param);
+ mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
@@ -2670,6 +2810,10 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
if (err)
goto err_free_rq;
+ err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
+
return 0;
err_free_rq:
@@ -3001,8 +3145,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
}
#endif
-int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
@@ -3095,20 +3239,28 @@ typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
+ struct mlx5e_params *old_params;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
- reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
+ old_params = &priv->channels.params;
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = priv->channels.params;
+ new_channels.params = *old_params;
new_channels.params.lro_en = enable;
+ if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
+ reset = false;
+ }
+
if (!reset) {
- priv->channels.params = new_channels.params;
+ *old_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
goto out;
}
@@ -3237,24 +3389,20 @@ static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t oper_features = netdev->features;
- int err;
+ int err = 0;
- err = mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_LRO, set_feature_lro);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_FILTER,
+#define MLX5E_HANDLE_FEATURE(feature, handler) \
+ mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_TC, set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXALL, set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXFCS, set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_NTUPLE, set_feature_arfs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
if (err) {
@@ -3288,34 +3436,40 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels new_channels = {};
- int curr_mtu;
+ struct mlx5e_params *params;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = !priv->channels.params.lro_en &&
- (priv->channels.params.rq_wq_type !=
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ params = &priv->channels.params;
+ reset = !params->lro_en;
reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
- curr_mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ new_channels.params = *params;
+ new_channels.params.sw_mtu = new_mtu;
+
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
+
+ reset = reset && (ppw_old != ppw_new);
+ }
if (!reset) {
+ params->sw_mtu = new_mtu;
mlx5e_set_dev_port_mtu(priv);
+ netdev->mtu = params->sw_mtu;
goto out;
}
- new_channels.params = priv->channels.params;
err = mlx5e_open_channels(priv, &new_channels);
- if (err) {
- netdev->mtu = curr_mtu;
+ if (err)
goto out;
- }
mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+ netdev->mtu = new_channels.params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
@@ -3605,21 +3759,11 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- int irqn_not_used, eqn;
- struct mlx5_eq *eq;
+ struct mlx5_eq *eq = sq->cq.mcq.eq;
u32 eqe_count;
- if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
- return false;
-
- eq = mlx5_eqn2eq(mdev, eqn);
- if (IS_ERR(eq))
- return false;
-
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eqn, eq->cons_index, eq->irqn);
+ eq->eqn, eq->cons_index, eq->irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
@@ -3630,13 +3774,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
return true;
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ struct net_device *dev = priv->netdev;
bool reopen_channels = false;
- int i;
+ int i, err;
- netdev_err(dev, "TX timeout detected\n");
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
@@ -3644,7 +3794,9 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(dev_queue))
continue;
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+
+ netdev_err(dev,
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - dev_queue->trans_start));
@@ -3657,8 +3809,27 @@ static void mlx5e_tx_timeout(struct net_device *dev)
}
}
- if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
- schedule_work(&priv->tx_timeout_work);
+ if (!reopen_channels)
+ goto unlock;
+
+ mlx5e_close_locked(dev);
+ err = mlx5e_open_locked(dev);
+ if (err)
+ netdev_err(priv->netdev,
+ "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ netdev_err(dev, "TX timeout detected\n");
+ queue_work(priv->wq, &priv->tx_timeout_work);
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
@@ -3708,7 +3879,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
+ mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
@@ -3853,15 +4024,6 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
-{
- int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-
- return bf_buf_size -
- sizeof(struct mlx5e_tx_wqe) +
- 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
-}
-
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
@@ -3901,16 +4063,20 @@ static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
return 0;
}
-static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
- return (link_speed && pci_bw &&
- (pci_bw < 40000) && (pci_bw < link_speed));
-}
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
-static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
-{
- return !(link_speed && pci_bw &&
- (pci_bw <= 16000) && (pci_bw < link_speed));
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
+#define MLX5E_SLOW_PCI_RATIO (2)
+
+ return link_speed && pci_bw &&
+ link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
@@ -3962,7 +4128,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -3976,20 +4142,15 @@ u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 max_channels)
+ u16 max_channels, u16 mtu)
{
- u8 cq_period_mode = 0;
- u32 link_speed = 0;
- u32 pci_bw = 0;
+ u8 rx_cq_period_mode;
+ params->sw_mtu = mtu;
+ params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = max_channels;
params->num_tc = 1;
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -3999,30 +4160,34 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
MLX5_CAP_GEN(mdev, vport_group_manager))
- params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
+ params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
- mlx5e_set_rq_params(mdev, params);
+ if (mlx5e_striding_rq_possible(mdev, params))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
+ !slow_pci_heuristic(mdev));
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ params->lro_en = !slow_pci_heuristic(mdev);
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
- cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
+ mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
+ mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
@@ -4044,9 +4209,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->hard_mtu = MLX5E_ETH_HARD_MTU;
- mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ profile->max_nch(mdev), netdev->mtu);
mutex_init(&priv->state_lock);
@@ -4070,7 +4235,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
static const struct switchdev_ops mlx5e_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
@@ -4105,6 +4270,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
+
if (!!MLX5_CAP_ETH(mdev, lro_cap))
netdev->vlan_features |= NETIF_F_LRO;
@@ -4176,7 +4344,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
if (MLX5_VPORT_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
@@ -4184,7 +4352,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_ipsec_build_netdev(priv);
}
-static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -4194,14 +4362,21 @@ static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
priv->q_counter = 0;
}
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
+ priv->drop_rq_q_counter = 0;
+ }
}
-static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (!priv->q_counter)
- return;
+ if (priv->q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ if (priv->drop_rq_q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -4313,7 +4488,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
@@ -4328,12 +4503,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
- /* Device already registered: sync netdev system state */
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
queue_work(priv->wq, &priv->set_rx_mode_work);
@@ -4440,18 +4609,18 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (err)
goto out;
- err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
+ mlx5e_create_q_counters(priv);
+
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_cleanup_tx;
+ goto err_destroy_q_counters;
}
err = profile->init_rx(priv);
if (err)
goto err_close_drop_rq;
- mlx5e_create_q_counter(priv);
-
if (profile->enable)
profile->enable(priv);
@@ -4460,7 +4629,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
-err_cleanup_tx:
+err_destroy_q_counters:
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
out:
@@ -4477,9 +4647,9 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->disable(priv);
flush_workqueue(priv->wq);
- mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index ea4b255380a2..d8f68e4d1018 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -44,6 +44,11 @@
#include "en_tc.h"
#include "fs_core.h"
+#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
+ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
+ max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
+
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
@@ -209,7 +214,7 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
- u16 *sqns_array, int sqns_num)
+ u32 *sqns_array, int sqns_num)
{
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
@@ -255,9 +260,9 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5e_channel *c;
int n, tc, num_sqs = 0;
int err = -ENOMEM;
- u16 *sqs;
+ u32 *sqs;
- sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
+ sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
if (!sqs)
goto out;
@@ -288,7 +293,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
{
#if IS_ENABLED(CONFIG_IPV6)
- unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
+ unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
DELAY_PROBE_TIME);
#else
unsigned long ipv6_interval = ~0UL;
@@ -424,7 +429,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
case NETEVENT_NEIGH_UPDATE:
n = ptr;
#if IS_ENABLED(CONFIG_IPV6)
- if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
+ if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
#else
if (n->tbl != &arp_tbl)
#endif
@@ -472,7 +477,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
* done per device delay prob time parameter.
*/
#if IS_ENABLED(CONFIG_IPV6)
- if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
+ if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
#else
if (!p->dev || p->tbl != &arp_tbl)
#endif
@@ -668,7 +673,6 @@ static int mlx5e_rep_open(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
mutex_lock(&priv->state_lock);
@@ -676,8 +680,9 @@ static int mlx5e_rep_open(struct net_device *dev)
if (err)
goto unlock;
- if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
- MLX5_ESW_VPORT_ADMIN_STATE_UP))
+ if (!mlx5_modify_vport_admin_state(priv->mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev);
unlock:
@@ -690,11 +695,12 @@ static int mlx5e_rep_close(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
mutex_lock(&priv->state_lock);
- (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+ mlx5_modify_vport_admin_state(priv->mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
mutex_unlock(&priv->state_lock);
return ret;
@@ -877,14 +883,14 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ params->hard_mtu = MLX5E_ETH_HARD_MTU;
+ params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
- params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -899,9 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
-#ifdef CONFIG_NET_SWITCHDEV
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
-#endif
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
@@ -927,8 +931,6 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
priv->channels.params.num_channels = profile->max_nch(mdev);
- priv->hard_mtu = MLX5E_ETH_HARD_MTU;
-
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 80b84f6af2a1..176645762e49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <linux/tcp.h>
#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
+#include <net/ip6_checksum.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -295,46 +296,36 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_free_rx_wqe(rq, wi);
}
-static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
-{
- return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
-}
-
static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+ struct mlx5e_dma_info *di,
+ u32 frag_offset, u32 len)
{
unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz));
dma_sync_single_for_cpu(rq->pdev,
- wi->umr.dma_info[page_idx].addr + frag_offset,
+ di->addr + frag_offset,
len, DMA_FROM_DEVICE);
- wi->skbs_frags[page_idx]++;
+ page_ref_inc(di->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- wi->umr.dma_info[page_idx].page, frag_offset,
- len, truesize);
+ di->page, frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header_mpwqe(struct device *pdev,
struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
+ struct mlx5e_dma_info *dma_info,
+ u32 offset, u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
unsigned int len;
/* Aligning len to sizeof(long) optimizes memcpy performance */
len = ALIGN(headlen_pg, sizeof(long));
dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data_offset(skb, 0,
- page_address(dma_info->page) + offset,
- len);
+ skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len);
+
if (unlikely(offset + headlen > PAGE_SIZE)) {
dma_info++;
headlen_pg = len;
@@ -347,14 +338,49 @@ mlx5e_copy_skb_header_mpwqe(struct device *pdev,
}
}
-static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+{
+ const bool no_xdp_xmit =
+ bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
+ int i;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ mlx5e_page_release(rq, &dma_info[i], true);
+}
+
+static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ rq->mpwqe.umr_in_progress = false;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+}
+
+static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
+{
+ return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+}
+
+static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_umr_wqe *wqe;
- u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+ struct mlx5e_umr_wqe *umr_wqe;
+ u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
+ int err;
u16 pi;
+ int i;
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
@@ -362,90 +388,44 @@ static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_post_nop(wq, sq->sqn, &sq->pc);
}
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
- wqe->ctrl.opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
-
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
- sq->pc += num_wqebbs;
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
-}
-
-static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
- u16 ix)
-{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
- int err;
- int i;
+ umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2))
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
+ offsetof(struct mlx5e_umr_wqe, inline_mtts));
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
err = mlx5e_page_alloc_mapped(rq, dma_info);
if (unlikely(err))
goto err_unmap;
- wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
- page_ref_add(dma_info->page, pg_strides);
+ umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
}
- memset(wi->skbs_frags, 0, sizeof(*wi->skbs_frags) * MLX5_MPWRQ_PAGES_PER_WQE);
+ bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
wi->consumed_strides = 0;
+ rq->mpwqe.umr_in_progress = true;
+
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->pc += MLX5E_UMR_WQEBBS;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+
return 0;
err_unmap:
while (--i >= 0) {
dma_info--;
- page_ref_sub(dma_info->page, pg_strides);
mlx5e_page_release(rq, dma_info, true);
}
+ rq->stats.buff_alloc_err++;
return err;
}
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
-{
- int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
- int i;
-
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
- page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
- mlx5e_page_release(rq, dma_info, true);
- }
-}
-
-static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
-{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
-
- rq->mpwqe.umr_in_progress = false;
-
- mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
-
- /* ensure wqes are visible to device before updating doorbell record */
- dma_wmb();
-
- mlx5_wq_ll_update_db_record(wq);
-}
-
-static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
-{
- int err;
-
- err = mlx5e_alloc_rx_umr_mpwqe(rq, ix);
- if (unlikely(err)) {
- rq->stats.buff_alloc_err++;
- return err;
- }
- rq->mpwqe.umr_in_progress = true;
- mlx5e_post_umr_wqe(rq, ix);
- return 0;
-}
-
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -544,7 +524,23 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (!rq->mpwqe.umr_in_progress)
mlx5e_alloc_rx_mpwqe(rq, wq->head);
- return true;
+ return false;
+}
+
+static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+{
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
+ (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
+
+ tcp->check = 0;
+ tcp->psh = get_cqe_lro_tcppsh(cqe);
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
}
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
@@ -553,14 +549,11 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
int network_depth = 0;
+ __wsum check;
__be16 proto;
u16 tot_len;
void *ip_p;
- u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
- u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
- (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
-
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -577,23 +570,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
ipv4->ihl);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
+ tot_len - sizeof(struct iphdr),
+ IPPROTO_TCP, check);
} else {
+ u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
ipv6->hop_limit = cqe->lro_min_ttl;
- ipv6->payload_len = cpu_to_be16(tot_len -
- sizeof(struct ipv6hdr));
- }
-
- tcp->psh = get_cqe_lro_tcppsh(cqe);
-
- if (tcp_ack) {
- tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ ipv6->payload_len = cpu_to_be16(payload_len);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
+ IPPROTO_TCP, check);
}
}
@@ -746,8 +746,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
prefetchw(wqe);
- if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
- MLX5E_SW2HW_MTU(rq->channel->priv, rq->netdev->mtu) < dma_len)) {
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
rq->stats.xdp_drop++;
return false;
}
@@ -786,7 +785,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
/* move page to reference to sq responsibility,
* and mark so it's not put back in page-cache.
*/
- rq->wqe.xdp_xmit = true;
+ __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
sq->db.di[pi] = *di;
sq->pc++;
@@ -835,6 +834,24 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
}
static inline
+struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
+ u32 frag_size, u16 headroom,
+ u32 cqe_bcnt)
+{
+ struct sk_buff *skb = build_skb(va, frag_size);
+
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ return NULL;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, cqe_bcnt);
+
+ return skb;
+}
+
+static inline
struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
@@ -849,10 +866,8 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev,
- di->addr + wi->offset,
- 0, frag_size,
- DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ frag_size, DMA_FROM_DEVICE);
prefetch(data);
wi->offset += frag_size;
@@ -867,18 +882,13 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
if (consumed)
return NULL; /* page/packet was consumed by XDP */
- skb = build_skb(va, frag_size);
- if (unlikely(!skb)) {
- rq->stats.buff_alloc_err++;
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ if (unlikely(!skb))
return NULL;
- }
/* queue up for recycling/reuse */
page_ref_inc(di->page);
- skb_reserve(skb, rx_headroom);
- skb_put(skb, cqe_bcnt);
-
return skb;
}
@@ -900,9 +910,8 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
- if (rq->wqe.xdp_xmit) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
wi->di.page = NULL;
- rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}
@@ -942,9 +951,8 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
- if (rq->wqe.xdp_xmit) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
wi->di.page = NULL;
- rq->wqe.xdp_xmit = false;
/* do not return page to cache, it will be returned on XDP_TX completion */
goto wq_ll_pop;
}
@@ -967,23 +975,28 @@ wq_ll_pop:
}
#endif
-static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- struct mlx5e_mpw_info *wi,
- u32 cqe_bcnt,
- struct sk_buff *skb)
+struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
- u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
- u32 head_page_idx = page_idx;
u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
- u16 byte_cnt = cqe_bcnt - headlen;
+ u32 byte_cnt = cqe_bcnt - headlen;
+ struct mlx5e_dma_info *head_di = di;
+ struct sk_buff *skb;
+
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ return NULL;
+ }
+
+ prefetchw(skb->data);
if (unlikely(frag_offset >= PAGE_SIZE)) {
- page_idx++;
+ di++;
frag_offset -= PAGE_SIZE;
}
@@ -991,18 +1004,59 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
u32 pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+ mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset,
pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
- page_idx++;
+ di++;
}
/* copy header */
- mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+ mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di,
head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
+
+ return skb;
+}
+
+struct sk_buff *
+mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+{
+ struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
+ u16 rx_headroom = rq->buff.headroom;
+ u32 cqe_bcnt32 = cqe_bcnt;
+ struct sk_buff *skb;
+ void *va, *data;
+ u32 frag_size;
+ bool consumed;
+
+ va = page_address(di->page) + head_offset;
+ data = va + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
+
+ dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
+ frag_size, DMA_FROM_DEVICE);
+ prefetch(data);
+
+ rcu_read_lock();
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32);
+ rcu_read_unlock();
+ if (consumed) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
+ return NULL; /* page/packet was consumed by XDP */
+ }
+
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* queue up for recycling/reuse */
+ page_ref_inc(di->page);
+
+ return skb;
}
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -1010,7 +1064,11 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
- struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
u16 cqe_bcnt;
@@ -1026,18 +1084,13 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto mpwrq_cqe_out;
}
- skb = napi_alloc_skb(rq->cq.napi,
- ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
- sizeof(long)));
- if (unlikely(!skb)) {
- rq->stats.buff_alloc_err++;
- goto mpwrq_cqe_out;
- }
-
- prefetchw(skb->data);
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+ skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
+ page_idx);
+ if (!skb)
+ goto mpwrq_cqe_out;
+
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
@@ -1045,6 +1098,7 @@ mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5a4608281f38..707976482c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
if (iph->protocol != IPPROTO_UDP)
goto out;
- udph = udp_hdr(skb);
+ /* Don't assume skb_transport_header() was set */
+ udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
if (udph->dest != htons(9))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 5f0f3493d747..b08c94422907 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -60,6 +60,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
@@ -153,6 +155,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_cqe_err += sq_stats->cqe_err;
+ s->tx_recover += sq_stats->recover;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none;
@@ -170,11 +174,24 @@ static const struct counter_desc q_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
};
+static const struct counter_desc drop_rq_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
+};
+
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
{
- return priv->q_counter ? NUM_Q_COUNTERS : 0;
+ int num_stats = 0;
+
+ if (priv->q_counter)
+ num_stats += NUM_Q_COUNTERS;
+
+ if (priv->drop_rq_q_counter)
+ num_stats += NUM_DROP_RQ_COUNTERS;
+
+ return num_stats;
}
static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
@@ -182,7 +199,13 @@ static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
int i;
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ q_stats_desc[i].format);
+
+ for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ drop_rq_stats_desc[i].format);
+
return idx;
}
@@ -191,7 +214,11 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
int i;
for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i);
+ for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ drop_rq_stats_desc, i);
return idx;
}
@@ -199,16 +226,76 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
- int err;
- if (!priv->q_counter)
- return;
+ if (priv->q_counter &&
+ !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
+ sizeof(out)))
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ if (priv->drop_rq_q_counter &&
+ !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
+ out, sizeof(out)))
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+}
+
+#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
+static const struct counter_desc vnic_env_stats_desc[] = {
+ { "rx_steer_missed_packets",
+ VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
+};
+
+#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
+
+static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
+ NUM_VNIC_ENV_COUNTERS : 0;
+}
+
+static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ return idx;
+
+ for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
+ return idx;
- err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
- if (err)
+ for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_desc, i);
+ return idx;
+}
+
+static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
+{
+ u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
return;
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+ MLX5_SET(query_vnic_env_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, op_mod, 0);
+ MLX5_SET(query_vnic_env_in, in, other_vport, 0);
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
@@ -754,7 +841,15 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
+static const struct counter_desc pport_pfc_stall_stats_desc[] = {
+ { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
+ { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
+};
+
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
+ MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
+ MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
{
@@ -790,7 +885,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
{
return (mlx5e_query_global_pause_combined(priv) +
hweight8(mlx5e_query_pfc_combined(priv))) *
- NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS +
+ NUM_PPORT_PFC_STALL_COUNTERS(priv);
}
static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
@@ -818,6 +914,10 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
}
}
+ for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_pfc_stall_stats_desc[i].format);
+
return idx;
}
@@ -845,6 +945,10 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
}
}
+ for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_pfc_stall_stats_desc, i);
+
return idx;
}
@@ -1003,6 +1107,8 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
};
static const struct counter_desc ch_stats_desc[] = {
@@ -1095,6 +1201,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.update_stats = mlx5e_grp_q_update_stats,
},
{
+ .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
+ .fill_strings = mlx5e_grp_vnic_env_fill_strings,
+ .fill_stats = mlx5e_grp_vnic_env_fill_stats,
+ .update_stats = mlx5e_grp_vnic_env_update_stats,
+ },
+ {
.get_num_stats = mlx5e_grp_vport_get_num_stats,
.fill_strings = mlx5e_grp_vport_fill_strings,
.fill_stats = mlx5e_grp_vport_fill_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 0b3320a2b072..53111a2df587 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -78,6 +78,8 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 tx_xmit_more;
+ u64 tx_cqe_err;
+ u64 tx_recover;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_buff_alloc_err;
@@ -97,6 +99,11 @@ struct mlx5e_sw_stats {
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
+ u32 rx_if_down_packets;
+};
+
+struct mlx5e_vnic_env_stats {
+ __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
@@ -192,6 +199,8 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 wake;
u64 dropped;
+ u64 cqe_err;
+ u64 recover;
};
struct mlx5e_ch_stats {
@@ -201,6 +210,7 @@ struct mlx5e_ch_stats {
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
+ struct mlx5e_vnic_env_stats vnic;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index eeff1fac77ef..4197001f9801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -964,7 +964,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
tbl = &arp_tbl;
#if IS_ENABLED(CONFIG_IPV6)
else if (m_neigh->family == AF_INET6)
- tbl = ipv6_stub->nd_tbl;
+ tbl = &nd_tbl;
#endif
else
return;
@@ -2530,11 +2530,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
-
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- attr->vlan = tcf_vlan_push_vid(a);
+ attr->vlan_vid = tcf_vlan_push_vid(a);
+ if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
+ attr->vlan_prio = tcf_vlan_push_prio(a);
+ attr->vlan_proto = tcf_vlan_push_proto(a);
+ if (!attr->vlan_proto)
+ attr->vlan_proto = htons(ETH_P_8021Q);
+ } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a)) {
+ return -EOPNOTSUPP;
+ }
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
}
@@ -2608,19 +2614,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (err != -EAGAIN)
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
+ !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+ kvfree(parse_attr);
+
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
- if (err)
- goto err_del_rule;
+ if (err) {
+ mlx5e_tc_del_flow(priv, flow);
+ kfree(flow);
+ }
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
- !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
- kvfree(parse_attr);
return err;
-err_del_rule:
- mlx5e_tc_del_flow(priv, flow);
-
err_free:
kvfree(parse_attr);
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 569b42a01026..20297108528a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
default:
hlen = mlx5e_skb_l2_header_offset(skb);
}
- return min_t(u16, hlen, skb->len);
+ return min_t(u16, hlen, skb_headlen(skb));
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -417,6 +417,18 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb, wqe, pi);
}
+static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
+ struct mlx5_err_cqe *err_cqe)
+{
+ u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+
+ netdev_err(sq->channel->netdev,
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
+ err_cqe->vendor_err_synd);
+ mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_txqsq *sq;
@@ -456,6 +468,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
+ &sq->state)) {
+ mlx5e_dump_error_cqe(sq,
+ (struct mlx5_err_cqe *)cqe);
+ queue_work(cq->channel->priv->wq,
+ &sq->recover.recover_work);
+ }
+ sq->stats.cqe_err++;
+ }
+
do {
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -509,7 +532,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
+ mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+ MLX5E_SQ_STOP_ROOM) &&
+ !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq);
sq->stats.wake++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 964cd8c4fdcc..332bc56306bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+ /* Create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
- /* create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == SRIOV_LEGACY)
- esw_vport_create_drop_counters(vport);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -2096,17 +2096,19 @@ unlock:
return err;
}
-static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- int vport_idx,
- struct mlx5_vport_drop_stats *stats)
+static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+ int vport_idx,
+ struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_vport *vport = &esw->vports[vport_idx];
+ u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
u16 idx = 0;
+ int err = 0;
if (!vport->enabled || esw->mode != SRIOV_LEGACY)
- return;
+ return 0;
if (vport->egress.drop_counter) {
idx = vport->egress.drop_counter->id;
@@ -2117,6 +2119,23 @@ static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
idx = vport->ingress.drop_counter->id;
mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
}
+
+ if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
+ !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ return 0;
+
+ err = mlx5_query_vport_down_stats(dev, vport_idx,
+ &rx_discard_vport_down,
+ &tx_discard_vport_down);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
+ stats->rx_dropped += rx_discard_vport_down;
+ if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
+ stats->tx_dropped += tx_discard_vport_down;
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -2180,7 +2199,9 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
vf_stats->broadcast =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
- mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+ if (err)
+ goto free_out;
vf_stats->rx_dropped = stats.rx_dropped;
vf_stats->tx_dropped = stats.tx_dropped;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 98d2177d0806..4cd773fa55e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -227,15 +227,14 @@ enum {
SET_VLAN_INSERT = BIT(1)
};
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x4000
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x8000
-
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep;
int action;
- u16 vlan;
+ __be16 vlan_proto;
+ u16 vlan_vid;
+ u8 vlan_prio;
bool vlan_handled;
u32 encap_id;
u32 mod_hdr_id;
@@ -258,6 +257,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags);
+static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
+}
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0a8303c1b52f..35e256eb2f6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -58,8 +58,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- /* per flow vlan pop/push is emulated, don't set that into the firmware */
- flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ flow_act.action = attr->action;
+ /* if per flow vlan pop/push is emulated, don't set that into the firmware */
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
+ flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
+ flow_act.vlan.vid = attr->vlan_vid;
+ flow_act.vlan.prio = attr->vlan_prio;
+ }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -88,10 +96,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
flow_act.encap_id = attr->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
@@ -185,7 +193,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
/* protects against (1) setting rules with different vlans to push and
* (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
*/
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
goto out_notsupp;
return 0;
@@ -202,6 +210,10 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
bool push, pop, fwd;
int err = 0;
+ /* nop if we're on the vlan push/pop non emulation mode */
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ return 0;
+
push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
@@ -239,11 +251,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan;
+ vport->vlan = attr->vlan_vid;
skip_set_push:
vport->vlan_refcount++;
}
@@ -261,6 +273,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
bool push, pop, fwd;
int err = 0;
+ /* nop if we're on the vlan push/pop non emulation mode */
+ if (mlx5_eswitch_vlan_actions_supported(esw->dev))
+ return 0;
+
if (!attr->vlan_handled)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 4f1568528738..0f5da499a223 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -1061,8 +1061,9 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
if (IS_ERR(rule->ctx)) {
+ int err = PTR_ERR(rule->ctx);
kfree(rule);
- return PTR_ERR(rule->ctx);
+ return err;
}
rule->fte = fte;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 645f83cac34d..ef5afd7c9325 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -317,7 +317,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5_flow_rule *dst;
- void *in_flow_context;
+ void *in_flow_context, *vlan;
void *in_match_value;
void *in_dests;
u32 *in;
@@ -340,11 +340,19 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
MLX5_SET(flow_context, in_flow_context, encap_id, fte->action.encap_id);
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio);
+
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 09aad58c8e06..de51e7c39bc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1438,7 +1438,10 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ MLX5_FLOW_CONTEXT_ACTION_DECAP |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
return true;
return false;
@@ -1646,7 +1649,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
list_for_each_entry(iter, match_head, list) {
nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
- ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
}
search_again_locked:
@@ -1768,8 +1770,11 @@ search_again_locked:
/* Collect all fgs which has a matching match_criteria */
err = build_match_list(&match_head, ft, spec);
- if (err)
+ if (err) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return ERR_PTR(err);
+ }
if (!take_write)
up_read_ref_node(&ft->node);
@@ -1778,8 +1783,11 @@ search_again_locked:
dest_num, version);
free_match_list(&match_head);
if (!IS_ERR(rule) ||
- (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return rule;
+ }
if (!take_write) {
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 17ec55874714..afd9f4fa22f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -183,6 +183,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, debug))
+ mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
+
if (MLX5_CAP_GEN(dev, pcam_reg))
mlx5_get_pcam_reg(dev);
@@ -248,7 +251,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
force_state = MLX5_GET(teardown_hca_out, out, force_state);
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
- mlx5_core_err(dev, "teardown with force mode failed\n");
+ mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index f953378bd13d..af3bb2f7a504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -56,14 +56,17 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* RQ size in ipoib by default is 512 */
- params->log_rq_size = is_kdump_kernel() ?
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
params->lro_en = false;
+ params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
@@ -79,10 +82,10 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
- priv->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
mutex_init(&priv->state_lock);
- mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ profile->max_nch(mdev), netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -366,25 +369,27 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
struct mlx5e_channels new_channels = {};
- int curr_mtu;
+ struct mlx5e_params *params;
int err = 0;
mutex_lock(&priv->state_lock);
- curr_mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ params = &priv->channels.params;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ params->sw_mtu = new_mtu;
+ netdev->mtu = params->sw_mtu;
goto out;
+ }
- new_channels.params = priv->channels.params;
+ new_channels.params = *params;
+ new_channels.params.sw_mtu = new_mtu;
err = mlx5e_open_channels(priv, &new_channels);
- if (err) {
- netdev->mtu = curr_mtu;
+ if (err)
goto out;
- }
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ netdev->mtu = new_channels.params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
@@ -538,7 +543,7 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
if (err)
- mlx5_core_dbg(mdev, "failed dettaching QPN 0x%x, MGID %pI6\n",
+ mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
ipriv->qp.qpn, gid->raw);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index b69e9d847a6b..54a188f41f90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
/* Use dummy rqs */
- priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
}
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index e159243e0fcf..857035583ccd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -34,6 +34,7 @@
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
#include "en.h"
+#include "clock.h"
enum {
MLX5_CYCLES_SHIFT = 23
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 08c33657677c..13b6f66310c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -552,7 +552,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
- cache_line_size() == 128 ? 1 : 0);
+ cache_line_size() >= 128 ? 1 : 0);
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 4e25f2b2e0bc..7d001fe6e631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -50,6 +50,11 @@ extern uint mlx5_core_debug_mask;
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_dbg_once(__dev, format, ...) \
+ dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
do { \
if ((mask) & mlx5_core_debug_mask) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index c37d00cd472a..fa9d0760dd36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -483,6 +483,17 @@ int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
+static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
+ u32 out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ out_size, MLX5_REG_PFCC, 0, 0);
+}
+
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
@@ -500,13 +511,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- MLX5_SET(pfcc_reg, in, local_port, 1);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 0);
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
if (err)
return err;
@@ -520,6 +528,49 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pptx_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, pprx_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, ppan_mask_n, 1);
+ MLX5_SET(pfcc_reg, in, critical_stall_mask, 1);
+ MLX5_SET(pfcc_reg, in, minor_stall_mask, 1);
+ MLX5_SET(pfcc_reg, in, device_stall_critical_watermark,
+ stall_critical_watermark);
+ MLX5_SET(pfcc_reg, in, device_stall_minor_watermark, stall_minor_watermark);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
+ if (err)
+ return err;
+
+ if (stall_critical_watermark)
+ *stall_critical_watermark = MLX5_GET(pfcc_reg, out,
+ device_stall_critical_watermark);
+
+ if (stall_minor_watermark)
+ *stall_minor_watermark = MLX5_GET(pfcc_reg, out,
+ device_stall_minor_watermark);
+
+ return 0;
+}
+
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
@@ -538,13 +589,10 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
- u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
- MLX5_SET(pfcc_reg, in, local_port, 1);
- err = mlx5_core_access_reg(dev, in, sizeof(in), out,
- sizeof(out), MLX5_REG_PFCC, 0, 0);
+ err = mlx5_query_pfcc_reg(dev, out, sizeof(out));
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 9e38343a951f..dae1c5c5d27c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -157,6 +157,31 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
}
EXPORT_SYMBOL(mlx5_core_query_sq);
+int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state)
+{
+ void *out;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(query_sq_out);
+ out = kvzalloc(inlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_sq(dev, sqn, out);
+ if (err)
+ goto out;
+
+ sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context);
+ *state = MLX5_GET(sqc, sqc, state);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
+
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
@@ -329,27 +354,6 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
-{
- u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
- void *srqc;
- void *xrc_srqc;
- int err;
-
- MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
- MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
- if (!err) {
- xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
- xrc_srq_context_entry);
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
- memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
- }
-
- return err;
-}
-
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index dfe36cf6fbea..177e076b8d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1070,6 +1070,32 @@ free:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
+ u64 *rx_discard_vport_down,
+ u64 *tx_discard_vport_down)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, op_mod, 0);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_vnic_env_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
+ vport_env.receive_discard_vport_down);
+ *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
+ vport_env.transmit_discard_vport_down);
+ return 0;
+}
+
int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index d56eea310509..f4d9c9975ac3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -76,6 +76,8 @@ config MLXSW_SPECTRUM
depends on PSAMPLE || PSAMPLE=n
depends on BRIDGE || BRIDGE=n
depends on IPV6 || IPV6=n
+ depends on NET_IPGRE || NET_IPGRE=n
+ depends on IPV6_GRE || IPV6_GRE=n
select PARMAN
select MLXFW
default m
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9463c3fa254f..0cadcabfe86f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o \
- spectrum_qdisc.o
+ spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 3529b545675d..93ea56620a24 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1008,6 +1008,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
+ struct mlxsw_res *res;
size_t alloc_size;
int err;
@@ -1032,8 +1033,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->res);
+ res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
if (err)
goto err_bus_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 5ddafd74dc00..092d39399f3c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -235,8 +235,7 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_split_data:1; /* indicate for the kvd's values */
-
+ used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_mid;
u16 max_pgt;
@@ -256,10 +255,8 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u16 kvd_hash_granularity;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
- u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -316,6 +313,7 @@ struct mlxsw_driver {
u64 *p_linear_size);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
+ bool res_query_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -326,14 +324,14 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
-#define MLXSW_CORE_RES_VALID(res, short_res_id) \
- mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id)
+#define MLXSW_CORE_RES_VALID(mlxsw_core, short_res_id) \
+ mlxsw_core_res_valid(mlxsw_core, MLXSW_RES_ID_##short_res_id)
u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
-#define MLXSW_CORE_RES_GET(res, short_res_id) \
- mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id)
+#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \
+ mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id)
#define MLXSW_BUS_F_TXRX BIT(0)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index b698fb481b2e..3c0d882ba183 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
}
EXPORT_SYMBOL(mlxsw_afa_block_jump);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
+{
+ if (block->finished)
+ return -EINVAL;
+ mlxsw_afa_set_goto_set(block->cur_set,
+ MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+ block->finished = true;
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_terminate);
+
static struct mlxsw_afa_fwd_entry *
mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
{
@@ -838,7 +849,6 @@ struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
u8 local_in_port;
- u8 local_out_port;
bool ingress;
};
@@ -848,7 +858,7 @@ mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
{
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
- mirror->local_out_port,
+ mirror->span_id,
mirror->ingress);
kfree(mirror);
}
@@ -864,9 +874,8 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
@@ -876,13 +885,12 @@ mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
return ERR_PTR(-ENOMEM);
err = block->afa->ops->mirror_add(block->afa->ops_priv,
- local_in_port, local_out_port,
+ local_in_port, out_dev,
ingress, &mirror->span_id);
if (err)
goto err_mirror_add;
mirror->ingress = ingress;
- mirror->local_out_port = local_out_port;
mirror->local_in_port = local_in_port;
mirror->resource.destructor = mlxsw_afa_mirror_destructor;
mlxsw_afa_resource_add(block, &mirror->resource);
@@ -909,13 +917,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port, bool ingress)
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
- mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
if (IS_ERR(mirror))
return PTR_ERR(mirror);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43132293475c..3a155d104384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -36,6 +36,7 @@
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#include <linux/types.h>
+#include <linux/netdevice.h>
struct mlxsw_afa;
struct mlxsw_afa_block;
@@ -48,9 +49,10 @@ struct mlxsw_afa_ops {
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ int (*mirror_add)(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
};
@@ -65,12 +67,14 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
+ u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index f6963b0b4a55..122506daa586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
- MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
- MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
+ MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
+ MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
+ MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
};
-#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
struct mlxsw_afk_element_inst { /* element instance in actual block */
const struct mlxsw_afk_element_info *info;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index ab710e37af99..84185f8dfbae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -218,32 +218,32 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
switch (attr_type) {
case MLXSW_HWMON_ATTR_TYPE_TEMP:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_input", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_highest", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_TEMP_RST:
mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0200;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_reset_history", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_FAN_RPM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"fan%u_input", num + 1);
break;
case MLXSW_HWMON_ATTR_TYPE_PWM:
mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show;
mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store;
- mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0644;
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"pwm%u", num + 1);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 85faa87bf42d..3a9381977d6d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1015,16 +1015,14 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
}
static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_res *res,
- u8 query_enabled)
+ struct mlxsw_res *res)
{
int index, i;
u64 data;
u16 id;
int err;
- /* Not all the versions support resources query */
- if (!query_enabled)
+ if (!res)
return 0;
mlxsw_cmd_mbox_zero(mbox);
@@ -1164,7 +1162,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (MLXSW_RES_VALID(res, KVD_SIZE)) {
+ if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
if (err)
return err;
@@ -1376,8 +1374,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
- err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res,
- profile->resource_query_enable);
+ err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
if (err)
goto err_query_resources;
@@ -1519,8 +1516,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
u8 *p_status)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
- dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
- dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+ dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
@@ -1532,11 +1528,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
if (err)
return err;
- if (in_mbox)
+ if (in_mbox) {
memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+ in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+ }
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
+ if (out_mbox)
+ out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0e08be41c8e0..6218231e379e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,11 +1,11 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -2872,6 +2872,14 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN);
+/* an_disable_admin
+ * Auto negotiation disable administrative configuration
+ * 0 - Device doesn't support AN disable.
+ * 1 - Device supports AN disable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1);
+
/* reg_ptys_local_port
* Local port number.
* Access: Index
@@ -3000,12 +3008,13 @@ MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port,
- u32 proto_admin)
+ u32 proto_admin, bool autoneg)
{
MLXSW_REG_ZERO(ptys, payload);
mlxsw_reg_ptys_local_port_set(payload, local_port);
mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+ mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg);
}
static inline void mlxsw_reg_ptys_eth_unpack(char *payload,
@@ -4216,6 +4225,12 @@ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);
+/* reg_ritr_ipv6_mc
+ * IPv6 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1);
+
enum mlxsw_reg_ritr_if_type {
/* VLAN interface. */
MLXSW_REG_RITR_VLAN_IF,
@@ -4281,6 +4296,14 @@ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);
+/* reg_ritr_ipv6_mc_fe
+ * IPv6 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1);
+
/* reg_ritr_lb_en
* Loop-back filter enable for unicast packets.
* If the flag is set then loop-back filter for unicast packets is
@@ -4504,12 +4527,14 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_ipv4_set(payload, 1);
mlxsw_reg_ritr_ipv6_set(payload, 1);
mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_set(payload, 1);
mlxsw_reg_ritr_type_set(payload, type);
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
@@ -6293,30 +6318,34 @@ MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
*/
MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);
-/* reg_rmft2_dip4
- * Destination IPv4 address
+/* reg_rmft2_dip{4,6}
+ * Destination IPv4/6 address
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16);
MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);
-/* reg_rmft2_dip4_mask
+/* reg_rmft2_dip{4,6}_mask
* A bit that is set directs the TCAM to compare the corresponding bit in key. A
* bit that is clear directs the TCAM to ignore the corresponding bit in key.
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16);
MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);
-/* reg_rmft2_sip4
- * Source IPv4 address
+/* reg_rmft2_sip{4,6}
+ * Source IPv4/6 address
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16);
MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);
-/* reg_rmft2_sip4_mask
+/* reg_rmft2_sip{4,6}_mask
* A bit that is set directs the TCAM to compare the corresponding bit in key. A
* bit that is clear directs the TCAM to ignore the corresponding bit in key.
* Access: RW
*/
+MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16);
MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);
/* reg_rmft2_flexible_action_set
@@ -6334,26 +6363,52 @@ MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void
-mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
- enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
- u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
- const char *flexible_action_set)
+mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset,
+ u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ const char *flex_action_set)
{
MLXSW_REG_ZERO(rmft2, payload);
mlxsw_reg_rmft2_v_set(payload, v);
- mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
mlxsw_reg_rmft2_offset_set(payload, offset);
mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
mlxsw_reg_rmft2_irif_set(payload, irif);
+ if (flex_action_set)
+ mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
+ flex_action_set);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
mlxsw_reg_rmft2_dip4_set(payload, dip4);
mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
mlxsw_reg_rmft2_sip4_set(payload, sip4);
mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
- if (flexible_action_set)
- mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
- flexible_action_set);
+}
+
+static inline void
+mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ struct in6_addr dip6, struct in6_addr dip6_mask,
+ struct in6_addr sip6, struct in6_addr sip6_mask,
+ const char *flexible_action_set)
+{
+ mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router,
+ irif_mask, irif, flexible_action_set);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6);
+ mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6);
+ mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask);
+ mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6);
+ mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
/* MFCR - Management Fan Control Register
@@ -6772,8 +6827,104 @@ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
*/
MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+enum mlxsw_reg_mpat_span_type {
+ /* Local SPAN Ethernet.
+ * The original packet is not encapsulated.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+
+ /* Encapsulated Remote SPAN Ethernet L3 GRE.
+ * The packet is encapsulated with GRE header.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3,
+};
+
+/* reg_mpat_span_type
+ * SPAN type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4);
+
+/* Remote SPAN - Ethernet VLAN
+ * - - - - - - - - - - - - - -
+ */
+
+/* reg_mpat_eth_rspan_vid
+ * Encapsulation header VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12);
+
+/* Encapsulated Remote SPAN - Ethernet L2
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_version {
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15,
+};
+
+/* reg_mpat_eth_rspan_version
+ * RSPAN mirror header version.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4);
+
+/* reg_mpat_eth_rspan_mac
+ * Destination MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6);
+
+/* reg_mpat_eth_rspan_tp
+ * Tag Packet. Indicates whether the mirroring header should be VLAN tagged.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1);
+
+/* Encapsulated Remote SPAN - Ethernet L3
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_protocol {
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6,
+};
+
+/* reg_mpat_eth_rspan_protocol
+ * SPAN encapsulation protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4);
+
+/* reg_mpat_eth_rspan_ttl
+ * Encapsulation header Time-to-Live/HopLimit.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8);
+
+/* reg_mpat_eth_rspan_smac
+ * Source MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6);
+
+/* reg_mpat_eth_rspan_dip*
+ * Destination IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16);
+
+/* reg_mpat_eth_rspan_sip*
+ * Source IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16);
+
static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
- u16 system_port, bool e)
+ u16 system_port, bool e,
+ enum mlxsw_reg_mpat_span_type span_type)
{
MLXSW_REG_ZERO(mpat, payload);
mlxsw_reg_mpat_pa_id_set(payload, pa_id);
@@ -6781,6 +6932,49 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
mlxsw_reg_mpat_e_set(payload, e);
mlxsw_reg_mpat_qos_set(payload, 1);
mlxsw_reg_mpat_be_set(payload, 1);
+ mlxsw_reg_mpat_span_type_set(payload, span_type);
+}
+
+static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid)
+{
+ mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload,
+ enum mlxsw_reg_mpat_eth_rspan_version version,
+ const char *mac,
+ bool tp)
+{
+ mlxsw_reg_mpat_eth_rspan_version_set(payload, version);
+ mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac);
+ mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl,
+ const char *smac,
+ u32 sip, u32 dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4);
+ mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip);
+ mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
+ const char *smac,
+ struct in6_addr sip, struct in6_addr dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6);
+ mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip);
+ mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip);
}
/* MPAR - Monitoring Port Analyzer Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3dcc58d61506..53fffd09d133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
@@ -71,11 +71,12 @@
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
#include "spectrum_acl_flex_actions.h"
+#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1530
-#define MLXSW_FWREV_SUBMINOR 152
+#define MLXSW_FWREV_MINOR 1620
+#define MLXSW_FWREV_SUBMINOR 192
#define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP_FW_FILENAME \
@@ -487,327 +488,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
- return -EIO;
-
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
- return -ENOMEM;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++)
- INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
-
- return 0;
-}
-
-static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
- kfree(mlxsw_sp->span.entries);
-}
-
-static struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- u8 local_port = port->local_port;
- int index;
- int i;
- int err;
-
- /* find a free entry to use */
- index = -1;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].used) {
- index = i;
- span_entry = &mlxsw_sp->span.entries[i];
- break;
- }
- }
- if (index < 0)
- return NULL;
-
- /* create a new port analayzer entry for local_port */
- mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- if (err)
- return NULL;
-
- span_entry->used = true;
- span_entry->id = index;
- span_entry->ref_count = 1;
- span_entry->local_port = local_port;
- return span_entry;
-}
-
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- u8 local_port = span_entry->local_port;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- int pa_id = span_entry->id;
-
- mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- span_entry->used = false;
-}
-
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- if (curr->used && curr->local_port == local_port)
- return curr;
- }
- return NULL;
-}
-
-static struct mlxsw_sp_span_entry
-*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
- port->local_port);
- if (span_entry) {
- /* Already exists, just take a reference */
- span_entry->ref_count++;
- return span_entry;
- }
-
- return mlxsw_sp_span_entry_create(port);
-}
-
-static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
- return 0;
-}
-
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
-}
-
-static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry)
-{
- struct mlxsw_sp_span_inspected_port *p;
-
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (port->local_port == p->local_port)
- return p;
- return NULL;
-}
-
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
-
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-}
-
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
- return err;
- }
- }
-
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
-
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
-
- return 0;
-
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
- return err;
-}
-
-static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
- if (!inspected_port)
- return;
-
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
-
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
-
- list_del(&inspected_port->list);
- kfree(inspected_port);
-}
-
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- span_entry = mlxsw_sp_span_entry_get(to);
- if (!span_entry)
- return -ENOENT;
-
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
-
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
- if (err)
- goto err_port_bind;
-
- return 0;
-
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
-}
-
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
- destination_port);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
- }
-
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
-}
-
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable, u32 rate)
{
@@ -1360,6 +1040,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
xstats->tail_drop[i] =
mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
}
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
+ xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
+ }
}
static void update_stats_cache(struct work_struct *work)
@@ -1459,6 +1149,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ mlxsw_sp_port_vlan->ref_count = 1;
mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
@@ -1486,8 +1177,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- if (mlxsw_sp_port_vlan)
+ if (mlxsw_sp_port_vlan) {
+ mlxsw_sp_port_vlan->ref_count++;
return mlxsw_sp_port_vlan;
+ }
return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
}
@@ -1496,6 +1189,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ if (--mlxsw_sp_port_vlan->ref_count != 0)
+ return;
+
if (mlxsw_sp_port_vlan->bridge_port)
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
else if (fid)
@@ -1578,7 +1274,6 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
- struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a);
@@ -1587,17 +1282,10 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
- if (!mlxsw_sp_port_dev_check(to_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
- return -EOPNOTSUPP;
- }
- to_port = netdev_priv(to_dev);
-
- mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
- true);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
+ true, &mirror->span_id);
}
static void
@@ -1608,7 +1296,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
span_type, true);
}
@@ -2702,7 +2390,7 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
int err;
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2736,7 +2424,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
bool autoneg;
int err;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2754,7 +2442,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
}
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_new);
+ eth_proto_new, autoneg);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
if (err)
return err;
@@ -2965,7 +2653,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_admin);
+ eth_proto_admin, mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -3692,6 +3380,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
+ MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
@@ -3995,14 +3684,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ /* Initialize router after SPAN is initialized, so that the FIB and
+ * neighbor event handlers can issue SPAN respin.
+ */
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
}
- /* Initialize netdevice notifier after router is initialized, so that
- * the event handler can use router structures.
+ /* Initialize netdevice notifier after router and SPAN is initialized,
+ * so that the event handler can use router structures and call SPAN
+ * respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
@@ -4011,12 +3710,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_netdev_notifier;
}
- err = mlxsw_sp_span_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
- goto err_span_init;
- }
-
err = mlxsw_sp_acl_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@ -4042,12 +3735,12 @@ err_ports_create:
err_dpipe_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
- mlxsw_sp_span_fini(mlxsw_sp);
-err_span_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -4073,9 +3766,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
- mlxsw_sp_span_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -4087,12 +3780,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
}
static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
- .used_max_vepa_channels = 1,
- .max_vepa_channels = 0,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
- .used_max_pgt = 1,
- .max_pgt = 0,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -4104,8 +3793,7 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_split_data = 1,
- .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .used_kvd_sizes = 1,
.kvd_hash_single_parts = 59,
.kvd_hash_double_parts = 41,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
@@ -4115,73 +3803,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
- .resource_query_enable = 1,
};
-static bool
-mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
- u64 size)
-{
- const struct mlxsw_config_profile *profile;
-
- profile = &mlxsw_sp_config_profile;
- if (size % profile->kvd_hash_granularity) {
- NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
- return false;
- }
- return true;
-}
-
-static int
-mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
- return -EINVAL;
-}
-
-static int
-mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -4190,30 +3813,16 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
}
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
- .size_validate = mlxsw_sp_resource_kvd_size_validate,
-};
-
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
+static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
};
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
-};
-
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
-};
-
-static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
-static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
-static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
-static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
-
static void
-mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
+ struct devlink_resource_size_params *kvd_size_params,
+ struct devlink_resource_size_params *linear_size_params,
+ struct devlink_resource_size_params *hash_double_size_params,
+ struct devlink_resource_size_params *hash_single_size_params)
{
u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
KVD_SINGLE_MIN_SIZE);
@@ -4222,37 +3831,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
u32 linear_size_min = 0;
- /* KVD top resource */
- mlxsw_sp_kvd_size_params.size_min = kvd_size;
- mlxsw_sp_kvd_size_params.size_max = kvd_size;
- mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
- /* Linear part init */
- mlxsw_sp_linear_size_params.size_min = linear_size_min;
- mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
- double_size_min;
- mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
- /* Hash double part init */
- mlxsw_sp_hash_double_size_params.size_min = double_size_min;
- mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
- linear_size_min;
- mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
-
- /* Hash single part init */
- mlxsw_sp_hash_single_size_params.size_min = single_size_min;
- mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
- linear_size_min;
- mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+ devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(linear_size_params, linear_size_min,
+ kvd_size - single_size_min -
+ double_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(hash_double_size_params,
+ double_size_min,
+ kvd_size - single_size_min -
+ linear_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ devlink_resource_size_params_init(hash_single_size_params,
+ single_size_min,
+ kvd_size - double_size_min -
+ linear_size_min,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
}
static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params hash_single_size_params;
+ struct devlink_resource_size_params hash_double_size_params;
+ struct devlink_resource_size_params linear_size_params;
+ struct devlink_resource_size_params kvd_size_params;
u32 kvd_size, single_size, double_size, linear_size;
const struct mlxsw_config_profile *profile;
int err;
@@ -4261,48 +3868,55 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
return -EIO;
- mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
+ &linear_size_params,
+ &hash_double_size_params,
+ &hash_single_size_params);
+
kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
- true, kvd_size,
- MLXSW_SP_RESOURCE_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
- &mlxsw_sp_kvd_size_params,
- &mlxsw_sp_resource_kvd_ops);
+ &kvd_size_params,
+ NULL);
if (err)
return err;
linear_size = profile->kvd_linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
- false, linear_size,
+ linear_size,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD,
- &mlxsw_sp_linear_size_params,
+ &linear_size_params,
&mlxsw_sp_resource_kvd_linear_ops);
if (err)
return err;
+ err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
+ if (err)
+ return err;
+
double_size = kvd_size - linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
- double_size = rounddown(double_size, profile->kvd_hash_granularity);
+ double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
- false, double_size,
+ double_size,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
- &mlxsw_sp_hash_double_size_params,
- &mlxsw_sp_resource_kvd_hash_double_ops);
+ &hash_double_size_params,
+ NULL);
if (err)
return err;
single_size = kvd_size - double_size - linear_size;
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
- false, single_size,
+ single_size,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
- &mlxsw_sp_hash_single_size_params,
- &mlxsw_sp_resource_kvd_hash_single_ops);
+ &hash_single_size_params,
+ NULL);
if (err)
return err;
@@ -4319,8 +3933,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
- !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
- !profile->used_kvd_split_data)
+ !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
return -EIO;
/* The hash part is what left of the kvd without the
@@ -4346,7 +3959,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
double_size /= profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts;
*p_double_size = rounddown(double_size,
- profile->kvd_hash_granularity);
+ MLXSW_SP_KVD_GRANULARITY);
}
err = devlink_resource_size_get(devlink,
@@ -4388,6 +4001,7 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
+ .res_query_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
@@ -4556,13 +4170,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
u16 lag_id;
if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Exceeded number of supported LAG devices");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
return false;
}
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
- NL_SET_ERR_MSG(extack,
- "spectrum: LAG device using unsupported Tx type");
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
}
return true;
@@ -4804,8 +4416,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
!netif_is_ovs_master(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Unknown upper device type");
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4814,8 +4425,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
if (netif_is_lag_master(upper_dev) &&
@@ -4823,24 +4433,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
info->upper_info, extack))
return -EINVAL;
if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is a LAG master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on a LAG port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is an OVS master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on an OVS port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
break;
@@ -4953,7 +4559,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
+ NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
return -EINVAL;
}
if (!info->linking)
@@ -4962,7 +4568,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
break;
@@ -5040,10 +4646,18 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_span_entry *span_entry;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (event == NETDEV_UNREGISTER) {
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
+ if (span_entry)
+ mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
+ }
+ mlxsw_sp_span_respin(mlxsw_sp);
+
if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bdd8f94a452c..82820ba43728 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -70,16 +70,23 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
enum mlxsw_sp_resource_id {
- MLXSW_SP_RESOURCE_KVD,
+ MLXSW_SP_RESOURCE_KVD = 1,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
};
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
+struct mlxsw_sp_span_entry;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -111,32 +118,13 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-};
-
-struct mlxsw_sp_span_entry {
- u8 local_port;
- bool used;
- struct list_head bound_ports_list;
- int ref_count;
- int id;
-};
-
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
- u8 to_local_port;
+ int span_id;
bool ingress;
};
@@ -211,6 +199,7 @@ struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
+ unsigned int ref_count;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
@@ -222,6 +211,8 @@ struct mlxsw_sp_port_xstats {
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
+ u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
+ u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_port {
@@ -259,6 +250,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_qdisc *root_qdisc;
+ struct mlxsw_sp_qdisc *tclass_qdiscs;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -396,16 +388,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type,
- bool bind);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type,
- bool bind);
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -461,6 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -552,6 +535,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
u16 group_id);
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0897a5435cc2..79b1fa27a9a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block)
return block->disable_count;
}
+static bool
+mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ /* We hold a reference on ruleset ourselves */
+ return ruleset->ref_count == 2;
+}
+
static int
mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
@@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_ht_insert;
- if (!chain_index) {
- /* We only need ruleset with chain index 0, the implicit one,
- * to be directly bound to device. The rest of the rulesets
- * are bound by "Goto action set".
- */
- err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
- if (err)
- goto err_ruleset_bind;
- }
-
return ruleset;
-err_ruleset_bind:
- rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
- mlxsw_sp_acl_ruleset_ht_params);
err_ht_insert:
ops->ruleset_del(mlxsw_sp, ruleset->priv);
err_ops_ruleset_add:
@@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
- struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
- u32 chain_index = ruleset->ht_key.chain_index;
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
- if (!chain_index)
- mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block);
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
mlxsw_sp_acl_ruleset_ht_params);
ops->ruleset_del(mlxsw_sp, ruleset->priv);
@@ -528,6 +518,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
return mlxsw_afa_block_jump(rulei->act_block, group_id);
}
+int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_afa_block_terminate(rulei->act_block);
+}
+
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_drop(rulei->act_block);
@@ -572,7 +567,6 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct net_device *out_dev)
{
struct mlxsw_sp_acl_block_binding *binding;
- struct mlxsw_sp_port *out_port;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list))
@@ -581,16 +575,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
- if (!mlxsw_sp_port_dev_check(out_dev))
- return -EINVAL;
-
- out_port = netdev_priv(out_dev);
- if (out_port->mlxsw_sp != mlxsw_sp)
- return -EINVAL;
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
- out_port->local_port,
+ out_dev,
binding->ingress);
}
@@ -695,10 +683,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset)) {
+ /* We only need ruleset with chain index 0, the implicit
+ * one, to be directly bound to device. The rest of the
+ * rulesets are bound by "Goto action set".
+ */
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
+ ruleset->ht_key.block);
+ if (err)
+ goto err_ruleset_block_bind;
+ }
+
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
ruleset->ht_key.block->rule_count++;
return 0;
+err_ruleset_block_bind:
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+ mlxsw_sp_acl_rule_ht_params);
err_rhashtable_insert:
ops->rule_del(mlxsw_sp, rule->priv);
return err;
@@ -712,6 +715,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
+ if (!ruleset->ht_key.chain_index &&
+ mlxsw_sp_acl_ruleset_is_singular(ruleset))
+ mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
+ ruleset->ht_key.block);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 6ca6894125f0..510ce48d87f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
@@ -35,6 +35,7 @@
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
+#include "spectrum_span.h"
#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
@@ -125,40 +126,23 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port, *out_port;
- struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_port *in_port;
struct mlxsw_sp *mlxsw_sp = priv;
enum mlxsw_sp_span_type type;
- int err;
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- out_port = mlxsw_sp->ports[local_out_port];
in_port = mlxsw_sp->ports[local_in_port];
- err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
- if (err)
- return err;
-
- span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
- if (!span_entry) {
- err = -ENOENT;
- goto err_span_entry_find;
- }
-
- *p_span_id = span_entry->id;
- return 0;
-
-err_span_entry_find:
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
- return err;
+ return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
+ false, p_span_id);
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *in_port;
@@ -167,7 +151,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
index 2726192836ad..bd6d552d95b9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -33,8 +33,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
-#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H
#include "spectrum.h"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index c6e180c2be1e..ad1b548e3cac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
- if (err)
- goto err_group_update;
-
err = rhashtable_init(&group->chunk_ht,
&mlxsw_sp_acl_tcam_chunk_ht_params);
if (err)
@@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rhashtable_init:
-err_group_update:
mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 93728c694e6d..0a9adc5962fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(10000, 0, 0),
+ MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index bbd238e50f05..54262af4e98f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
[MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
[MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
+ [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
};
static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
[MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
- [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
};
static const int *mlxsw_sp_packet_type_sfgc_types[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6ce00e28d4ea..89dbf569dff5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_ok(a)) {
- err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ err = mlxsw_sp_acl_rulei_act_terminate(rulei);
if (err)
return err;
} else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 7502e53447bd..98d896c14b87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,126 +33,125 @@
*/
#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
#include "spectrum_ipip.h"
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev)
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms)
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
+{
+ struct ip6_tnl *tun = netdev_priv(ol_dev);
+
+ return tun->parms;
+}
+
+static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
{
return !!(parms.i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
{
return !!(parms.o_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_ikey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms.i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_okey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms.o_key) : 0;
}
-static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
{
- return parms.iph.saddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_saddr4(parms),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- }
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
}
-static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
{
- return parms.iph.daddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
+
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_daddr4(parms),
- };
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_saddr(parms4);
case MLXSW_SP_L3_PROTO_IPV6:
- break;
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_saddr(parms6);
}
WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return (union mlxsw_sp_l3addr) {0};
}
-static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
-static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
-static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
}
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_saddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
-static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_daddr(parms4);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_daddr(parms6);
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {0};
}
-static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
{
- return mlxsw_sp_ipip_parms_daddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
+ union mlxsw_sp_l3addr naddr = {0};
+
+ return !memcmp(&addr, &naddr, sizeof(naddr));
}
static int
@@ -176,12 +175,17 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev);
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
- u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev);
char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct ip_tunnel_parm parms;
unsigned int type_check;
+ bool has_ikey;
u32 daddr4;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
@@ -243,15 +247,14 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
{
union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev);
union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev);
- union mlxsw_sp_l3addr naddr = {0};
/* Tunnels with unset local or remote address are valid in Linux and
* used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access
* (NBMA) tunnels. In principle these can be offloaded, but the driver
* currently doesn't support this. So punt.
*/
- return memcmp(&saddr, &naddr, sizeof(naddr)) &&
- memcmp(&daddr, &naddr, sizeof(naddr));
+ return !mlxsw_sp_l3addr_is_zero(saddr) &&
+ !mlxsw_sp_l3addr_is_zero(daddr);
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
@@ -273,14 +276,15 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_netdev_okey(ol_dev),
+ .okey = mlxsw_sp_ipip_parms4_okey(parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -300,16 +304,12 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+ new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
- new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
+ new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
+ old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
+ new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
+ old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
@@ -326,14 +326,14 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_okey(new_parms)) ||
- ipip_entry->parms.link != new_parms.link) {
+ } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_okey(new_parms)) ||
+ ipip_entry->parms4.link != new_parms.link) {
update_tunnel = true;
} else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_ikey(new_parms)) {
update_decap = true;
}
@@ -350,7 +350,7 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
false, false, false,
extack);
- ipip_entry->parms = new_parms;
+ ipip_entry->parms4 = new_parms;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 04b08d9d76e9..6909d867bb59 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,14 +37,19 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
+#include <linux/if_tunnel.h>
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev);
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev);
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
+
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
MLXSW_SP_IPIP_TYPE_MAX,
@@ -56,7 +61,9 @@ struct mlxsw_sp_ipip_entry {
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- struct ip_tunnel_parm parms;
+ union {
+ struct ip_tunnel_parm parms4;
+ };
};
struct mlxsw_sp_ipip_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 55f9d2d70f9e..8796db44dcc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -55,24 +55,47 @@
#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
-#define MLXSW_SP_CHUNK_MAX 32
-#define MLXSW_SP_LARGE_CHUNK_MAX 512
+#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
+#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
+ enum mlxsw_sp_resource_id resource_id;
};
+enum mlxsw_sp_kvdl_part_id {
+ MLXSW_SP_KVDL_PART_ID_SINGLE,
+ MLXSW_SP_KVDL_PART_ID_CHUNKS,
+ MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
+};
+
+#define MLXSW_SP_KVDL_PART_INFO(id) \
+[MLXSW_SP_KVDL_PART_ID_##id] = { \
+ .start_index = MLXSW_SP_KVDL_##id##_BASE, \
+ .end_index = MLXSW_SP_KVDL_##id##_END, \
+ .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
+ .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
+}
+
+static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
+ MLXSW_SP_KVDL_PART_INFO(SINGLE),
+ MLXSW_SP_KVDL_PART_INFO(CHUNKS),
+ MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
+};
+
+#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
+
struct mlxsw_sp_kvdl_part {
- struct list_head list;
- const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp_kvdl {
- struct list_head parts_list;
+ struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
};
static struct mlxsw_sp_kvdl_part *
@@ -80,11 +103,13 @@ mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
+ int i;
- list_for_each_entry(part, &kvdl->parts_list, list) {
- if (alloc_size <= part->info->alloc_size &&
+ for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (alloc_size <= part->info.alloc_size &&
(!min_part ||
- part->info->alloc_size <= min_part->info->alloc_size))
+ part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
@@ -95,10 +120,12 @@ static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp_kvdl_part *part;
+ int i;
- list_for_each_entry(part, &kvdl->parts_list, list) {
- if (kvdl_index >= part->info->start_index &&
- kvdl_index <= part->info->end_index)
+ for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
+ part = kvdl->parts[i];
+ if (kvdl_index >= part->info.start_index &&
+ kvdl_index <= part->info.end_index)
return part;
}
@@ -122,7 +149,7 @@ mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
u32 *p_kvdl_index)
{
- const struct mlxsw_sp_kvdl_part_info *info = part->info;
+ const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
@@ -132,8 +159,7 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
return -ENOBUFS;
__set_bit(entry_index, part->usage);
- *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info,
- entry_index);
+ *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
return 0;
}
@@ -141,10 +167,10 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
u32 kvdl_index)
{
+ const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index;
- entry_index = mlxsw_sp_kvdl_index_entry_index(part->info,
- kvdl_index);
+ entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
}
@@ -183,135 +209,212 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(part))
return PTR_ERR(part);
- *p_alloc_size = part->info->alloc_size;
+ *p_alloc_size = part->info.alloc_size;
return 0;
}
-static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
- {
- .part_index = 0,
- .start_index = MLXSW_SP_KVDL_SINGLE_BASE,
- .end_index = MLXSW_SP_KVDL_SINGLE_END,
- .alloc_size = 1,
- },
- {
- .part_index = 1,
- .start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
- .end_index = MLXSW_SP_KVDL_CHUNKS_END,
- .alloc_size = MLXSW_SP_CHUNK_MAX,
- },
- {
- .part_index = 2,
- .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
- .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
- .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
- },
-};
-
-static struct mlxsw_sp_kvdl_part *
-mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
+static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
+ struct mlxsw_sp_kvdl_part *part_prev,
+ unsigned int size)
{
- struct mlxsw_sp_kvdl_part *part;
- list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) {
- if (part->info->part_index == part_index)
- return part;
+ if (!part_prev) {
+ part->info.end_index = size - 1;
+ } else {
+ part->info.start_index = part_prev->info.end_index + 1;
+ part->info.end_index = part->info.start_index + size - 1;
}
-
- return NULL;
}
-static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
- unsigned int part_index)
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_kvdl_part_info *info,
+ struct mlxsw_sp_kvdl_part *part_prev)
{
- const struct mlxsw_sp_kvdl_part_info *info;
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl_part *part;
+ bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
+ u64 resource_size;
+ int err;
- info = &kvdl_parts_info[part_index];
+ err = devlink_resource_size_get(devlink, info->resource_id,
+ &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
+ nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- part->info = info;
- list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+ memcpy(&part->info, info, sizeof(part->info));
- return 0;
+ if (need_update)
+ mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
+ return part;
}
-static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
- unsigned int part_index)
+static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
{
- struct mlxsw_sp_kvdl_part *part;
-
- part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index);
- if (!part)
- return;
-
- list_del(&part->list);
kfree(part);
}
static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
+ const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part *part_prev = NULL;
int err, i;
- INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list);
-
- for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) {
- err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i);
- if (err)
+ for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
+ info = &mlxsw_sp_kvdl_parts_info[i];
+ kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
+ part_prev);
+ if (IS_ERR(kvdl->parts[i])) {
+ err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
+ }
+ part_prev = kvdl->parts[i];
}
-
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+ mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int i;
- for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--)
- mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+ for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
+ mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
{
+ const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
- nr_entries = (part->info->end_index -
- part->info->start_index + 1) /
- part->info->alloc_size;
+ nr_entries = (info->end_index -
+ info->start_index + 1) /
+ info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
- occ += part->info->alloc_size;
+ occ += info->alloc_size;
return occ;
}
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_kvdl_part *part;
u64 occ = 0;
+ int i;
- list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list)
- occ += mlxsw_sp_kvdl_part_occ(part);
+ for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
+ occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
return occ;
}
+static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
+ .occ_get = mlxsw_sp_kvdl_single_occ_get,
+};
+
+static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
+ .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
+};
+
+static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
+ .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
+};
+
+int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ static struct devlink_resource_size_params size_params;
+ u32 kvdl_max_size;
+ int err;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ MLXSW_SP_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params,
+ &mlxsw_sp_kvdl_single_ops);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params,
+ &mlxsw_sp_kvdl_chunks_ops);
+ if (err)
+ return err;
+
+ devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
+ MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &size_params,
+ &mlxsw_sp_kvdl_chunks_large_ops);
+ return err;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index d20b143de3b4..a82539609d49 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -33,6 +33,7 @@
*/
#include <linux/rhashtable.h>
+#include <net/ipv6.h>
#include "spectrum_mr.h"
#include "spectrum_router.h"
@@ -47,6 +48,11 @@ struct mlxsw_sp_mr {
/* priv has to be always the last item */
};
+struct mlxsw_sp_mr_vif;
+struct mlxsw_sp_mr_vif_ops {
+ bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif);
+};
+
struct mlxsw_sp_mr_vif {
struct net_device *dev;
const struct mlxsw_sp_rif *rif;
@@ -61,6 +67,9 @@ struct mlxsw_sp_mr_vif {
* instance is used as an ingress VIF
*/
struct list_head route_ivif_list;
+
+ /* Protocol specific operations for a VIF */
+ const struct mlxsw_sp_mr_vif_ops *ops;
};
struct mlxsw_sp_mr_route_vif_entry {
@@ -70,6 +79,17 @@ struct mlxsw_sp_mr_route_vif_entry {
struct mlxsw_sp_mr_route *mr_route;
};
+struct mlxsw_sp_mr_table;
+struct mlxsw_sp_mr_table_ops {
+ bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *mfc);
+ void (*key_create)(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *mfc);
+ bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route);
+};
+
struct mlxsw_sp_mr_table {
struct list_head node;
enum mlxsw_sp_l3proto proto;
@@ -78,6 +98,7 @@ struct mlxsw_sp_mr_table {
struct mlxsw_sp_mr_vif vifs[MAXVIFS];
struct list_head route_list;
struct rhashtable route_ht;
+ const struct mlxsw_sp_mr_table_ops *ops;
char catchall_route_priv[0];
/* catchall_route_priv has to be always the last item */
};
@@ -88,7 +109,7 @@ struct mlxsw_sp_mr_route {
struct mlxsw_sp_mr_route_key key;
enum mlxsw_sp_mr_route_action route_action;
u16 min_mtu;
- struct mfc_cache *mfc4;
+ struct mr_mfc *mfc;
void *route_priv;
const struct mlxsw_sp_mr_table *mr_table;
/* A list of route_vif_entry structs that point to the egress VIFs */
@@ -104,14 +125,9 @@ static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
.automatic_shrinking = true,
};
-static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
-{
- return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
-}
-
static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
{
- return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
+ return vif->ops->is_regular(vif) && vif->dev && vif->rif;
}
static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
@@ -122,18 +138,9 @@ static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
static bool
mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
{
- vifi_t ivif;
+ vifi_t ivif = mr_route->mfc->mfc_parent;
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ivif = mr_route->mfc4->mfc_parent;
- return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
- return false;
+ return mr_route->mfc->mfc_un.res.ttls[ivif] != 255;
}
static int
@@ -149,19 +156,6 @@ mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
return valid_evifs;
}
-static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
-{
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
- return false;
-}
-
static enum mlxsw_sp_mr_route_action
mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
{
@@ -174,7 +168,8 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
/* The kernel does not match a (*,G) route that the ingress interface is
* not one of the egress interfaces, so trap these kind of routes.
*/
- if (mlxsw_sp_mr_route_starg(mr_route) &&
+ if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) &&
!mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
@@ -195,25 +190,11 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
static enum mlxsw_sp_mr_route_prio
mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
{
- return mlxsw_sp_mr_route_starg(mr_route) ?
+ return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table,
+ mr_route) ?
MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
}
-static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route_key *key,
- const struct mfc_cache *mfc)
-{
- bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));
-
- memset(key, 0, sizeof(*key));
- key->vrid = mr_table->vr_id;
- key->proto = mr_table->proto;
- key->group.addr4 = mfc->mfc_mcastgrp;
- key->group_mask.addr4 = htonl(0xffffffff);
- key->source.addr4 = mfc->mfc_origin;
- key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
-}
-
static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
struct mlxsw_sp_mr_vif *mr_vif)
{
@@ -343,8 +324,8 @@ static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
}
static struct mlxsw_sp_mr_route *
-mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc)
+mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
struct mlxsw_sp_mr_route *mr_route;
@@ -356,12 +337,13 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
if (!mr_route)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&mr_route->evif_list);
- mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
/* Find min_mtu and link iVIF and eVIFs */
mr_route->min_mtu = ETH_MAX_MTU;
- ipmr_cache_hold(mfc);
- mr_route->mfc4 = mfc;
+ mr_cache_hold(mfc);
+ mr_route->mfc = mfc;
+ mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc);
+
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
if (mfc->mfc_un.res.ttls[i] != 255) {
@@ -374,59 +356,38 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
}
}
- mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
+ mlxsw_sp_mr_route_ivif_link(mr_route,
+ &mr_table->vifs[mfc->mfc_parent]);
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
err:
- ipmr_cache_put(mfc);
+ mr_cache_put(mfc);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
return ERR_PTR(err);
}
-static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route *mr_route)
+static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
{
struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
mlxsw_sp_mr_route_ivif_unlink(mr_route);
- ipmr_cache_put(mr_route->mfc4);
+ mr_cache_put(mr_route->mfc);
list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
mlxsw_sp_mr_route_evif_unlink(rve);
kfree(mr_route);
}
-static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
- struct mlxsw_sp_mr_route *mr_route)
-{
- switch (mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
-}
-
static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
bool offload)
{
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- if (offload)
- mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
- else
- mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
+ if (offload)
+ mr_route->mfc->mfc_flags |= MFC_OFFLOAD;
+ else
+ mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD;
}
static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
@@ -448,25 +409,18 @@ static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
mlxsw_sp_mr_route_destroy(mr_table, mr_route);
}
-int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc, bool replace)
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace)
{
struct mlxsw_sp_mr_route *mr_orig_route = NULL;
struct mlxsw_sp_mr_route *mr_route;
int err;
- /* If the route is a (*,*) route, abort, as these kind of routes are
- * used for proxy routes.
- */
- if (mfc->mfc_origin == htonl(INADDR_ANY) &&
- mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
- dev_warn(mr_table->mlxsw_sp->bus_info->dev,
- "Offloading proxy routes is not supported.\n");
+ if (!mr_table->ops->is_route_valid(mr_table, mfc))
return -EINVAL;
- }
/* Create a new route */
- mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
+ mr_route = mlxsw_sp_mr_route_create(mr_table, mfc);
if (IS_ERR(mr_route))
return PTR_ERR(mr_route);
@@ -511,7 +465,7 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
list_del(&mr_orig_route->node);
- mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
}
mlxsw_sp_mr_mfc_offload_update(mr_route);
@@ -524,17 +478,17 @@ err_rhashtable_insert:
list_del(&mr_route->node);
err_no_orig_route:
err_duplicate_route:
- mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
return err;
}
-void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc)
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc)
{
struct mlxsw_sp_mr_route *mr_route;
struct mlxsw_sp_mr_route_key key;
- mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
+ mr_table->ops->key_create(mr_table, &key, mfc);
mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
mlxsw_sp_mr_route_ht_params);
if (mr_route)
@@ -839,6 +793,125 @@ void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
}
}
+/* Protocol specific functions */
+static bool
+mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc_cache *mfc = (struct mfc_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (mfc->mfc_origin == htonl(INADDR_ANY) &&
+ mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc_cache *mfc = (struct mfc_cache *) c;
+ bool starg;
+
+ starg = (mfc->mfc_origin == htonl(INADDR_ANY));
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV4;
+ key->group.addr4 = mfc->mfc_mcastgrp;
+ key->group_mask.addr4 = htonl(0xffffffff);
+ key->source.addr4 = mfc->mfc_origin;
+ key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
+}
+
+static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
+}
+
+static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
+}
+
+static bool
+mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mr_mfc *c)
+{
+ struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (ipv6_addr_any(&mfc->mf6c_origin) &&
+ ipv6_addr_any(&mfc->mf6c_mcastgrp)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return false;
+ }
+ return true;
+}
+
+static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mr_mfc *c)
+{
+ const struct mfc6_cache *mfc = (struct mfc6_cache *) c;
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = MLXSW_SP_L3_PROTO_IPV6;
+ key->group.addr6 = mfc->mf6c_mcastgrp;
+ memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6));
+ key->source.addr6 = mfc->mf6c_origin;
+ if (!ipv6_addr_any(&mfc->mf6c_origin))
+ memset(&key->source_mask.addr6, 0xff,
+ sizeof(key->source_mask.addr6));
+}
+
+static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_mr_route *mr_route)
+{
+ return ipv6_addr_any(&mr_route->key.source_mask.addr6);
+}
+
+static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & MIFF_REGISTER);
+}
+
+static struct
+mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = {
+ {
+ .is_regular = mlxsw_sp_mr_vif4_is_regular,
+ },
+ {
+ .is_regular = mlxsw_sp_mr_vif6_is_regular,
+ },
+};
+
+static struct
+mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = {
+ {
+ .is_route_valid = mlxsw_sp_mr_route4_validate,
+ .key_create = mlxsw_sp_mr_route4_key,
+ .is_route_starg = mlxsw_sp_mr_route4_starg,
+ },
+ {
+ .is_route_valid = mlxsw_sp_mr_route6_validate,
+ .key_create = mlxsw_sp_mr_route6_key,
+ .is_route_starg = mlxsw_sp_mr_route6_starg,
+ },
+
+};
+
struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
u32 vr_id,
enum mlxsw_sp_l3proto proto)
@@ -847,6 +920,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
.prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
.key = {
.vrid = vr_id,
+ .proto = proto,
},
.value = {
.route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
@@ -865,6 +939,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
mr_table->vr_id = vr_id;
mr_table->mlxsw_sp = mlxsw_sp;
mr_table->proto = proto;
+ mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto];
INIT_LIST_HEAD(&mr_table->route_list);
err = rhashtable_init(&mr_table->route_ht,
@@ -875,6 +950,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MAXVIFS; i++) {
INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
+ mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto];
}
err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
@@ -941,18 +1017,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
&bytes);
- switch (mr_route->mr_table->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- if (mr_route->mfc4->mfc_un.res.pkt != packets)
- mr_route->mfc4->mfc_un.res.lastuse = jiffies;
- mr_route->mfc4->mfc_un.res.pkt = packets;
- mr_route->mfc4->mfc_un.res.bytes = bytes;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- /* fall through */
- default:
- WARN_ON_ONCE(1);
- }
+ if (mr_route->mfc->mfc_un.res.pkt != packets)
+ mr_route->mfc->mfc_un.res.lastuse = jiffies;
+ mr_route->mfc->mfc_un.res.pkt = packets;
+ mr_route->mfc->mfc_un.res.bytes = bytes;
}
static void mlxsw_sp_mr_stats_update(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
index 5d26a122af49..7c864a86811d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -36,6 +36,7 @@
#define _MLXSW_SPECTRUM_MCROUTER_H
#include <linux/mroute.h>
+#include <linux/mroute6.h>
#include "spectrum_router.h"
#include "spectrum.h"
@@ -109,10 +110,10 @@ struct mlxsw_sp_mr_table;
int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_mr_ops *mr_ops);
void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc, bool replace);
-void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
- struct mfc_cache *mfc);
+int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc, bool replace);
+void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mr_mfc *mfc);
int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
struct net_device *dev, vifi_t vif_index,
unsigned long vif_flags,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 4c7f32d4288d..4f4c0d311883 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -51,7 +51,7 @@ struct mlxsw_sp_mr_tcam_region {
};
struct mlxsw_sp_mr_tcam {
- struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
+ struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
};
/* This struct maps to one RIGR2 register entry */
@@ -316,20 +316,37 @@ static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
mlxsw_afa_block_first_set(afa_block));
break;
case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ key->group.addr6,
+ key->group_mask.addr6,
+ key->source.addr6,
+ key->source_mask.addr6,
+ mlxsw_afa_block_first_set(afa_block));
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
+ struct mlxsw_sp_mr_route_key *key,
struct parman_item *parman_item)
{
+ struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
char rmft2_pl[MLXSW_REG_RMFT2_LEN];
- mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
- 0, 0, 0, 0, 0, 0, NULL);
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
+ vrid, 0, 0, 0, 0, 0, 0, NULL);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
+ vrid, 0, 0, zero_addr, zero_addr,
+ zero_addr, zero_addr, NULL);
+ break;
+ }
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
}
@@ -353,27 +370,30 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static struct mlxsw_sp_mr_tcam_region *
+mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ return &mr_tcam->tcam_regions[proto];
+}
+
static int
mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route,
enum mlxsw_sp_mr_route_prio prio)
{
- struct parman_prio *parman_prio = NULL;
+ struct mlxsw_sp_mr_tcam_region *tcam_region;
int err;
- switch (route->key.proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
- err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
- parman_prio, &route->parman_item);
- if (err)
- return err;
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
- }
- route->parman_prio = parman_prio;
+ tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
+ route->key.proto);
+ err = parman_item_add(tcam_region->parman,
+ &tcam_region->parman_prios[prio],
+ &route->parman_item);
+ if (err)
+ return err;
+
+ route->parman_prio = &tcam_region->parman_prios[prio];
return 0;
}
@@ -381,15 +401,13 @@ static void
mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
struct mlxsw_sp_mr_tcam_route *route)
{
- switch (route->key.proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
- route->parman_prio, &route->parman_item);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- default:
- WARN_ON_ONCE(1);
- }
+ struct mlxsw_sp_mr_tcam_region *tcam_region;
+
+ tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
+ route->key.proto);
+
+ parman_item_remove(tcam_region->parman,
+ route->parman_prio, &route->parman_item);
}
static int
@@ -462,7 +480,7 @@ static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
- &route->parman_item);
+ &route->key, &route->parman_item);
mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
@@ -806,21 +824,42 @@ mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
+ u32 rtar_key;
+ int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
return -EIO;
- return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
- &mr_tcam->ipv4_tcam_region,
- MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
+ err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV4],
+ rtar_key);
+ if (err)
+ return err;
+
+ rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
+ err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &region[MLXSW_SP_L3_PROTO_IPV6],
+ rtar_key);
+ if (err)
+ goto err_ipv6_region_init;
+
+ return 0;
+
+err_ipv6_region_init:
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
+ return err;
}
static void mlxsw_sp_mr_tcam_fini(void *priv)
{
struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
- mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV6]);
+ mlxsw_sp_mr_tcam_region_fini(&region[MLXSW_SP_L3_PROTO_IPV4]);
}
const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 0b7670459051..91262b0573e3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -42,6 +42,8 @@
#include "reg.h"
#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
+ MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
@@ -76,6 +78,7 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc {
u32 handle;
u8 tclass_num;
+ u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -99,6 +102,44 @@ mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
mlxsw_sp_qdisc->handle == handle;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
+ bool root_only)
+{
+ int tclass, child_index;
+
+ if (parent == TC_H_ROOT)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (root_only || !mlxsw_sp_port->root_qdisc ||
+ !mlxsw_sp_port->root_qdisc->ops ||
+ TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
+ return NULL;
+
+ child_index = TC_H_MIN(parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ return &mlxsw_sp_port->tclass_qdiscs[tclass];
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
+{
+ int i;
+
+ if (mlxsw_sp_port->root_qdisc->handle == handle)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ return NULL;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
+ return &mlxsw_sp_port->tclass_qdiscs[i];
+
+ return NULL;
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -185,6 +226,23 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static void
+mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
+ u8 prio_bitmap, u64 *tx_packets,
+ u64 *tx_bytes)
+{
+ int i;
+
+ *tx_packets = 0;
+ *tx_bytes = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (prio_bitmap & BIT(i)) {
+ *tx_packets += xstats->tx_packets[i];
+ *tx_bytes += xstats->tx_bytes[i];
+ }
+ }
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -230,17 +288,16 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- stats_base->tx_packets = stats->tx_packets;
- stats_base->tx_bytes = stats->tx_bytes;
-
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &stats_base->tx_packets,
+ &stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = xstats->tail_drop[tclass_num];
@@ -255,6 +312,12 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
mlxsw_sp_qdisc->tclass_num);
}
@@ -319,6 +382,7 @@ mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
p->qstats->backlog -= backlog;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
}
static int
@@ -357,14 +421,16 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+ tx_bytes = tx_bytes - stats_base->tx_bytes;
+ tx_packets = tx_packets - stats_base->tx_packets;
+
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
@@ -406,11 +472,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
-
if (p->command == TC_RED_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -441,9 +506,13 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
{
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[i]);
+ mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ }
return 0;
}
@@ -467,16 +536,41 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- int tclass, i;
+ struct mlxsw_sp_qdisc *child_qdisc;
+ int tclass, i, band, backlog;
+ u8 old_priomap;
int err;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
- err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
- if (err)
- return err;
+ for (band = 0; band < p->bands; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ old_priomap = child_qdisc->prio_bitmap;
+ child_qdisc->prio_bitmap = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (p->priomap[i] == band) {
+ child_qdisc->prio_bitmap |= BIT(i);
+ if (BIT(i) & old_priomap)
+ continue;
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
+ i, tclass);
+ if (err)
+ return err;
+ }
+ }
+ if (old_priomap != child_qdisc->prio_bitmap &&
+ child_qdisc->ops && child_qdisc->ops->clean_stats) {
+ backlog = child_qdisc->stats_base.backlog;
+ child_qdisc->ops->clean_stats(mlxsw_sp_port,
+ child_qdisc);
+ child_qdisc->stats_base.backlog = backlog;
+ }
+ }
+ for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc->prio_bitmap = 0;
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
}
-
return 0;
}
@@ -513,6 +607,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
drops += xstats->tail_drop[i];
+ drops += xstats->wred_drop[i];
backlog += xstats->backlog[i];
}
drops = drops - stats_base->drops;
@@ -548,8 +643,10 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->tx_bytes = stats->tx_bytes;
stats_base->drops = 0;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += xstats->wred_drop[i];
+ }
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
@@ -564,15 +661,48 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
+/* Grafting is not supported in mlxsw. It will result in un-offloading of the
+ * grafted qdisc as well as the qdisc in the qdisc new location.
+ * (However, if the graft is to the location where the qdisc is already at, it
+ * will be ignored completely and won't cause un-offloading).
+ */
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ struct mlxsw_sp_qdisc *old_qdisc;
+
+ /* Check if the grafted qdisc is already in its "new" location. If so -
+ * nothing needs to be done.
+ */
+ if (p->band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ return 0;
+
+ /* See if the grafted qdisc is already offloaded on any tclass. If so,
+ * unoffload it.
+ */
+ old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
+ p->child_handle);
+ if (old_qdisc)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ return -EOPNOTSUPP;
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
if (p->command == TC_PRIO_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -589,6 +719,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_PRIO_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_PRIO_GRAFT:
+ return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->graft_params);
default:
return -EOPNOTSUPP;
}
@@ -596,17 +729,36 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_port->root_qdisc)
- return -ENOMEM;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int i;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_root_qdisc_init;
+
+ mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
+ mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_tclass_qdiscs_init;
+
+ mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+
return 0;
+
+err_tclass_qdiscs_init:
+ kfree(mlxsw_sp_port->root_qdisc);
+err_root_qdisc_init:
+ return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ kfree(mlxsw_sp_port->tclass_qdiscs);
kfree(mlxsw_sp_port->root_qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f0b25baba09a..1904c0323d39 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,10 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -70,6 +70,7 @@
#include "spectrum_mr.h"
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+#include "spectrum_span.h"
struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
@@ -466,7 +467,7 @@ struct mlxsw_sp_vr {
unsigned int rif_count;
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
- struct mlxsw_sp_mr_table *mr4_table;
+ struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -710,7 +711,9 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
- return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
+ return !!vr->fib4 || !!vr->fib6 ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
+ !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -788,45 +791,61 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
+ struct mlxsw_sp_fib *fib4;
+ struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
- if (IS_ERR(vr->fib4))
- return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
- if (IS_ERR(vr->fib6)) {
- err = PTR_ERR(vr->fib6);
+ fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(fib4))
+ return ERR_CAST(fib4);
+ fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib6)) {
+ err = PTR_ERR(fib6);
goto err_fib6_create;
}
- vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
- MLXSW_SP_L3_PROTO_IPV4);
- if (IS_ERR(vr->mr4_table)) {
- err = PTR_ERR(vr->mr4_table);
- goto err_mr_table_create;
+ mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(mr4_table)) {
+ err = PTR_ERR(mr4_table);
+ goto err_mr4_table_create;
}
+ mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(mr6_table)) {
+ err = PTR_ERR(mr6_table);
+ goto err_mr6_table_create;
+ }
+
+ vr->fib4 = fib4;
+ vr->fib6 = fib6;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
vr->tb_id = tb_id;
return vr;
-err_mr_table_create:
- mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
- vr->fib6 = NULL;
+err_mr6_table_create:
+ mlxsw_sp_mr_table_destroy(mr4_table);
+err_mr4_table_create:
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
err_fib6_create:
- mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
- vr->fib4 = NULL;
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
return ERR_PTR(err);
}
static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr)
{
- mlxsw_sp_mr_table_destroy(vr->mr4_table);
- vr->mr4_table = NULL;
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
+ mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
+ vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
vr->fib6 = NULL;
mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
@@ -849,7 +868,8 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
list_empty(&vr->fib6->node_list) &&
- mlxsw_sp_mr_table_empty(vr->mr4_table))
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
+ mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
mlxsw_sp_vr_destroy(mlxsw_sp, vr);
}
@@ -1020,9 +1040,11 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_ipip_type ipipt,
struct net_device *ol_dev)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
if (!ipip_entry)
return ERR_PTR(-ENOMEM);
@@ -1036,7 +1058,15 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
- ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
+
+ switch (ipip_ops->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON(1);
+ break;
+ }
return ipip_entry;
@@ -1376,6 +1406,55 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
decap_fib_entry);
}
+static int
+mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
+ struct mlxsw_sp_vr *ul_vr, bool enable)
+{
+ struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ struct mlxsw_sp_rif *rif = &lb_rif->common;
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u32 saddr4;
+
+ switch (lb_cf.ul_protocol) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
+ MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
+ ul_vr->id, saddr4, lb_cf.okey);
+ break;
+
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return -EAFNOSUPPORT;
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_rif_ipip_lb *lb_rif;
+ struct mlxsw_sp_vr *ul_vr;
+ int err = 0;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry) {
+ lb_rif = ipip_entry->ol_lb;
+ ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
+ err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
+ if (err)
+ goto out;
+ lb_rif->common.mtu = ol_dev->mtu;
+ }
+
+out:
+ return err;
+}
+
static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *ol_dev)
{
@@ -1656,6 +1735,8 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
extack = info->extack;
return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
ol_dev, extack);
+ case NETDEV_CHANGEMTU:
+ return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
}
return 0;
}
@@ -2316,6 +2397,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
read_unlock_bh(&n->lock);
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
entry_connected = nud_state & NUD_VALID && !dead;
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!entry_connected && !neigh_entry)
@@ -2413,7 +2496,8 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
mlxsw_core_schedule_work(&net_work->work);
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
- case NETEVENT_MULTIPATH_HASH_UPDATE:
+ case NETEVENT_IPV4_MPATH_HASH_UPDATE:
+ case NETEVENT_IPV6_MPATH_HASH_UPDATE:
net = ptr;
if (!net_eq(net, &init_net))
@@ -3790,6 +3874,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
int i;
+ if (!list_is_singular(&nh_grp->fib_list))
+ return;
+
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
@@ -5357,10 +5444,20 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static struct mlxsw_sp_mr_table *
+mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
+{
+ if (family == RTNL_FAMILY_IPMR)
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
+ else
+ return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
+}
+
static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info,
bool replace)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5370,12 +5467,14 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(vr))
return PTR_ERR(vr);
- return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
}
static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
struct mfc_entry_notifier_info *men_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5385,7 +5484,8 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!vr))
return;
- mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
+ mlxsw_sp_mr_route_del(mrt, men_info->mfc);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
@@ -5393,6 +5493,7 @@ static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
@@ -5403,8 +5504,9 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(vr))
return PTR_ERR(vr);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
- return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
+ return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
ven_info->vif_index,
ven_info->vif_flags, rif);
}
@@ -5413,6 +5515,7 @@ static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
struct vif_entry_notifier_info *ven_info)
{
+ struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
if (mlxsw_sp->router->aborted)
@@ -5422,7 +5525,8 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!vr))
return;
- mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
+ mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
+ mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
@@ -5514,7 +5618,7 @@ static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
- int i;
+ int i, j;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
@@ -5522,7 +5626,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp_vr_is_used(vr))
continue;
- mlxsw_sp_mr_table_flush(vr->mr4_table);
+ for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
+ mlxsw_sp_mr_table_flush(vr->mr_table[j]);
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no
@@ -5572,6 +5677,8 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -5614,6 +5721,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
@@ -5657,11 +5766,11 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- ipmr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
- ipmr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
@@ -5741,7 +5850,7 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
- ipmr_cache_hold(fib_work->men_info.mfc);
+ mr_cache_hold(fib_work->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD: /* fall through */
case FIB_EVENT_VIF_DEL:
@@ -5783,10 +5892,14 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
if (!ipmr_rule_default(rule) && !rule->l3mdev)
err = -1;
break;
+ case RTNL_FAMILY_IP6MR:
+ if (!ip6mr_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
}
if (err < 0)
- NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
return err;
}
@@ -5802,7 +5915,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
if (!net_eq(info->net, &init_net) ||
(info->family != AF_INET && info->family != AF_INET6 &&
- info->family != RTNL_FAMILY_IPMR))
+ info->family != RTNL_FAMILY_IPMR &&
+ info->family != RTNL_FAMILY_IP6MR))
return NOTIFY_DONE;
router = container_of(nb, struct mlxsw_sp_router, fib_nb);
@@ -5832,6 +5946,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
mlxsw_sp_router_fib6_event(fib_work, info);
break;
+ case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
mlxsw_sp_router_fibmr_event(fib_work, info);
@@ -6013,7 +6128,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
u16 rif_index;
- int err;
+ int i, err;
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
ops = mlxsw_sp->router->rif_ops_arr[type];
@@ -6025,7 +6140,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
}
@@ -6053,9 +6168,11 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
- err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
- if (err)
- goto err_mr_rif_add;
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
+ if (err)
+ goto err_mr_rif_add;
+ }
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
@@ -6063,6 +6180,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
return rif;
err_mr_rif_add:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
err_configure:
if (fid)
@@ -6082,13 +6201,15 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
struct mlxsw_sp_vr *vr;
+ int i;
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
- mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
ops->deconfigure(rif);
if (fid)
/* Loopback RIFs are not associated with a FID. */
@@ -6495,13 +6616,16 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (rif->mtu != dev->mtu) {
struct mlxsw_sp_vr *vr;
+ int i;
/* The RIF is relevant only to its mr_table instance, as unlike
* unicast routing, in multicast routing a RIF cannot be shared
* between several multicast routing tables.
*/
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
+ mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
+ rif, dev->mtu);
}
ether_addr_copy(rif->addr, dev->dev_addr);
@@ -6837,33 +6961,6 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
}
static int
-mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
- struct mlxsw_sp_vr *ul_vr, bool enable)
-{
- struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
- struct mlxsw_sp_rif *rif = &lb_rif->common;
- struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
- char ritr_pl[MLXSW_REG_RITR_LEN];
- u32 saddr4;
-
- switch (lb_cf.ul_protocol) {
- case MLXSW_SP_L3_PROTO_IPV4:
- saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
- mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
- rif->rif_index, rif->vr_id, rif->dev->mtu);
- mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr->id, saddr4, lb_cf.okey);
- break;
-
- case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
-}
-
-static int
mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
@@ -7006,13 +7103,25 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
{
+ bool only_l3 = !ip6_multipath_hash_policy(&init_net);
+
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
+ if (only_l3) {
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
+ } else {
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_SPORT);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_DPORT);
+ }
}
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 1fb82246ce96..a01edcf56797 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -41,6 +41,7 @@
enum mlxsw_sp_l3proto {
MLXSW_SP_L3_PROTO_IPV4,
MLXSW_SP_L3_PROTO_IPV6,
+#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1)
};
union mlxsw_sp_l3addr {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
new file mode 100644
index 000000000000..65a77708ff61
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -0,0 +1,824 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <net/arp.h>
+#include <net/gre.h>
+#include <net/ndisc.h>
+#include <net/ip6_tunnel.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "spectrum_ipip.h"
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
+ return -EIO;
+
+ mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_SPAN);
+ mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
+ sizeof(struct mlxsw_sp_span_entry),
+ GFP_KERNEL);
+ if (!mlxsw_sp->span.entries)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ INIT_LIST_HEAD(&curr->bound_ports_list);
+ curr->id = i;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
+ }
+ kfree(mlxsw_sp->span.entries);
+}
+
+static int
+mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = netdev_priv(to_dev);
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_reg_mpat_span_type span_type)
+{
+ struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .can_handle = mlxsw_sp_port_dev_check,
+ .parms = mlxsw_sp_span_entry_phys_parms,
+ .configure = mlxsw_sp_span_entry_phys_configure,
+ .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
+};
+
+static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *l3edev,
+ unsigned char dmac[ETH_ALEN])
+{
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
+ int err = 0;
+
+ if (!neigh) {
+ neigh = neigh_create(tbl, pkey, l3edev);
+ if (IS_ERR(neigh))
+ return PTR_ERR(neigh);
+ }
+
+ neigh_event_send(neigh, NULL);
+
+ read_lock_bh(&neigh->lock);
+ if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
+ memcpy(dmac, neigh->ha, ETH_ALEN);
+ else
+ err = -ENOENT;
+ read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
+ return err;
+}
+
+static int
+mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = NULL;
+ return 0;
+}
+
+static __maybe_unused int
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
+ union mlxsw_sp_l3addr saddr,
+ union mlxsw_sp_l3addr daddr,
+ union mlxsw_sp_l3addr gw,
+ __u8 ttl,
+ struct neigh_table *tbl,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ unsigned char dmac[ETH_ALEN];
+
+ if (mlxsw_sp_l3addr_is_zero(gw))
+ gw = daddr;
+
+ if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
+ mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ sparmsp->dest_port = netdev_priv(l3edev);
+ sparmsp->ttl = ttl;
+ memcpy(sparmsp->dmac, dmac, ETH_ALEN);
+ memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
+ sparmsp->saddr = saddr;
+ sparmsp->daddr = daddr;
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+static struct net_device *
+mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+ __be32 *saddrp, __be32 *daddrp)
+{
+ struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct net_device *dev = NULL;
+ struct ip_tunnel_parm parms;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+
+ /* We assume "dev" stays valid after rt is put. */
+ ASSERT_RTNL();
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+ 0, 0, parms.link, tun->fwmark);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+ return NULL;
+
+ if (rt->rt_type != RTN_UNICAST)
+ goto out;
+
+ dev = rt->dst.dev;
+ *saddrp = fl4.saddr;
+ *daddrp = rt->rt_gateway;
+
+out:
+ ip_rt_put(rt);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
+ union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
+ bool inherit_tos = tparm.iph.tos & 0x1;
+ bool inherit_ttl = !tparm.iph.ttl;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.iph.ttl,
+ &arp_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
+ sparms.ttl, sparms.smac,
+ be32_to_cpu(sparms.saddr.addr4),
+ be32_to_cpu(sparms.daddr.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
+ .can_handle = is_gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .configure = mlxsw_sp_span_entry_gretap4_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
+};
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+static struct net_device *
+mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
+ struct in6_addr *saddrp,
+ struct in6_addr *daddrp)
+{
+ struct ip6_tnl *t = netdev_priv(to_dev);
+ struct flowi6 fl6 = t->fl.u.ip6;
+ struct net_device *dev = NULL;
+ struct dst_entry *dst;
+ struct rt6_info *rt6;
+
+ /* We assume "dev" stays valid after dst is released. */
+ ASSERT_RTNL();
+
+ fl6.flowi6_mark = t->parms.fwmark;
+ if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
+ return NULL;
+
+ dst = ip6_route_output(t->net, NULL, &fl6);
+ if (!dst || dst->error)
+ goto out;
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ dev = dst->dev;
+ *saddrp = fl6.saddr;
+ *daddrp = rt6->rt6i_gateway;
+
+out:
+ dst_release(dst);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
+ union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
+ bool inherit_ttl = !tparm.hop_limit;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.hop_limit,
+ &nd_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
+ sparms.saddr.addr6,
+ sparms.daddr.addr6);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
+ .can_handle = is_ip6gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .configure = mlxsw_sp_span_entry_gretap6_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
+};
+#endif
+
+static const
+struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
+ &mlxsw_sp_span_entry_ops_phys,
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+ &mlxsw_sp_span_entry_ops_gretap4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+ &mlxsw_sp_span_entry_ops_gretap6,
+#endif
+};
+
+static int
+mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
+ .parms = mlxsw_sp_span_entry_nop_parms,
+ .configure = mlxsw_sp_span_entry_nop_configure,
+ .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
+};
+
+static void
+mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ if (sparms.dest_port) {
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ } else if (span_entry->ops->configure(span_entry, sparms)) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ }
+ }
+
+ span_entry->parms = sparms;
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ if (span_entry->parms.dest_port)
+ span_entry->ops->deconfigure(span_entry);
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry = NULL;
+ int i;
+
+ /* find a free entry to use */
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (!mlxsw_sp->span.entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span.entries[i];
+ break;
+ }
+ }
+ if (!span_entry)
+ return NULL;
+
+ span_entry->ops = ops;
+ span_entry->ref_count = 1;
+ span_entry->to_dev = to_dev;
+ mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
+
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+}
+
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->to_dev == to_dev)
+ return curr;
+ }
+ return NULL;
+}
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+ span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->id == span_id)
+ return curr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
+ if (span_entry) {
+ /* Already exists, just take a reference */
+ span_entry->ref_count++;
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ WARN_ON(!span_entry->ref_count);
+ if (--span_entry->ref_count == 0)
+ mlxsw_sp_span_entry_destroy(span_entry);
+ return 0;
+}
+
+static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_inspected_port *p;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ list_for_each_entry(p, &curr->bound_ports_list, list)
+ if (p->local_port == port->local_port &&
+ p->type == MLXSW_SP_SPAN_EGRESS)
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
+{
+ return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+}
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the mtu value
+ */
+ if (mlxsw_sp_span_is_egress_mirror(port)) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlxsw_sp_span_inspected_port *
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ struct mlxsw_sp_port *port,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *p;
+
+ list_for_each_entry(p, &span_entry->bound_ports_list, list)
+ if (type == p->type &&
+ port->local_port == p->local_port &&
+ bind == p->bound)
+ return p;
+ return NULL;
+}
+
+static int
+mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int i;
+ int err;
+
+ /* A given (source port, direction) can only be bound to one analyzer,
+ * so if a binding is requested, check for conflicts.
+ */
+ if (bind)
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr =
+ &mlxsw_sp->span.entries[i];
+
+ if (mlxsw_sp_span_entry_bound_port_find(curr, type,
+ port, bind))
+ return -EEXIST;
+ }
+
+ /* if it is an egress SPAN, bind a shared buffer to it */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+ port->dev->mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
+
+ inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
+ if (!inspected_port) {
+ err = -ENOMEM;
+ goto err_inspected_port_alloc;
+ }
+ inspected_port->local_port = port->local_port;
+ inspected_port->type = type;
+ inspected_port->bound = bind;
+ list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+
+ return 0;
+
+err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
+ port, bind);
+ if (!inspected_port)
+ return;
+
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+ /* remove the SBIB buffer if it was egress SPAN */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+
+ list_del(&inspected_port->list);
+ kfree(inspected_port);
+}
+
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
+
+ return NULL;
+}
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type, bool bind,
+ int *p_span_id)
+{
+ struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms sparms = {NULL};
+ struct mlxsw_sp_span_entry *span_entry;
+ int err;
+
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ err = ops->parms(to_dev, &sparms);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOENT;
+
+ netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
+ span_entry->id);
+
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+ if (err)
+ goto err_port_bind;
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_port_bind:
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ return err;
+}
+
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
+ if (!span_entry) {
+ netdev_err(from->dev, "no span entry found\n");
+ return;
+ }
+
+ netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
+ span_entry->id);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+ int err;
+
+ ASSERT_RTNL();
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ struct mlxsw_sp_span_parms sparms = {NULL};
+
+ if (!curr->ref_count)
+ continue;
+
+ err = curr->ops->parms(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
new file mode 100644
index 000000000000..4b87ec20e658
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -0,0 +1,107 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_SPAN_H
+#define _MLXSW_SPECTRUM_SPAN_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "spectrum_router.h"
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+
+enum mlxsw_sp_span_type {
+ MLXSW_SP_SPAN_EGRESS,
+ MLXSW_SP_SPAN_INGRESS
+};
+
+struct mlxsw_sp_span_inspected_port {
+ struct list_head list;
+ enum mlxsw_sp_span_type type;
+ u8 local_port;
+
+ /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
+ bool bound;
+};
+
+struct mlxsw_sp_span_parms {
+ struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
+ unsigned int ttl;
+ unsigned char dmac[ETH_ALEN];
+ unsigned char smac[ETH_ALEN];
+ union mlxsw_sp_l3addr daddr;
+ union mlxsw_sp_l3addr saddr;
+};
+
+struct mlxsw_sp_span_entry_ops;
+
+struct mlxsw_sp_span_entry {
+ const struct net_device *to_dev;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms parms;
+ struct list_head bound_ports_list;
+ int ref_count;
+ int id;
+};
+
+struct mlxsw_sp_span_entry_ops {
+ bool (*can_handle)(const struct net_device *to_dev);
+ int (*parms)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
+ int (*configure)(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms);
+ void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
+};
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type,
+ bool bind, int *p_span_id);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev);
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry);
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 593ad31be749..c11c9a635866 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- kfree(sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+out:
+ kfree(sfd_pl);
return err;
}
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid_idx, bool adding)
{
char *sfd_pl;
+ u8 num_rec;
int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ if (err)
+ goto out;
+
+ if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ err = -EBUSY;
+
+out:
kfree(sfd_pl);
return err;
}
@@ -1819,7 +1844,7 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
if (is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
+ NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
}
@@ -1882,20 +1907,16 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct net_device *dev = bridge_port->dev;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
- return -EINVAL;
- }
- vid = vlan_dev_vlan_id(bridge_port->dev);
-
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1912,8 +1933,10 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+ struct net_device *dev = bridge_port->dev;
+ u16 vid;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index ab7a29846bfa..c698ec4fd9d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -510,7 +510,6 @@ static const struct mlxsw_config_profile mlxsw_sib_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_IB,
}
},
- .resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sib_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index f3c29bbf07e2..a655c5850aa6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -789,7 +789,7 @@ mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
u32 supported, advertising, lp_advertising;
int err;
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
@@ -879,7 +879,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
mlxsw_sx_to_ptys_advert_link(advertising) :
mlxsw_sx_to_ptys_speed(speed);
- mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+ mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to get proto");
@@ -897,7 +897,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
return 0;
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_new);
+ eth_proto_new, true);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
if (err) {
netdev_err(dev, "Failed to set proto admin");
@@ -1029,7 +1029,7 @@ mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
- eth_proto_admin);
+ eth_proto_admin, true);
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
}
@@ -1706,7 +1706,6 @@ static const struct mlxsw_config_profile mlxsw_sx_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_IB,
}
},
- .resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sx_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index ec6cef8267ae..399e9d6993f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -77,6 +77,7 @@ enum {
MLXSW_TRAP_ID_IPV6_DHCP = 0x69,
MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,