summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-05-04 21:04:49 +0300
committerDavid S. Miller <davem@davemloft.net>2016-05-04 21:04:49 +0300
commit3f7496aa72d3411579f996eb3998d1441906e99d (patch)
tree0e1b42ca6fa9b263db4d97e84b86c45cf1f0893f
parent27540247c595b0244b96d4f65c8a09db3f2c1aa8 (diff)
parent1edc57e2b3d3bf8672bb1553dbd541cc94f54937 (diff)
downloadlinux-3f7496aa72d3411579f996eb3998d1441906e99d.tar.xz
Merge branch 'mlx5-sriov-updates'
Saeed Mahameed says: ==================== Mellanox 100G ethernet SRIOV Upgrades This series introduces new features and upgrades for mlx5 etherenet SRIOV, while the first patch provides a bug fixes for a compilation issue introduced buy the previous aRFS series for when CONFIG_RFS_ACCEL=y and CONFIG_MLX5_CORE_EN=n. Changes from V0: - 1st patch: Don't add a new Kconfig flag. Instead, compile out en_arfs.c \ contents when CONFIG_RFS_ACCEL=n SRIOV upgrades: - Use synchronize_irq instead of the vport events spin_lock - Fix memory leak in error flow - Added full VST support - Spoofcheck support - Trusted VF promiscuous and allmulti support VST and Spoofcheck in details: - Adding Low level firmware commands support for creating ACLs (Access Control Lists) Flow tables. ACLs are regular flow tables with the only exception that they are bound to a specific e-Switch vport (VF) and they can be one of two types > egress ACL: filters traffic going from e-Switch to VF. > ingress ACL: filters traffic going from VF to e-Switch. - Ingress/Egress ACLs (per vport) for VF VST mode filtering. - Ingress/Egress ACLs (per vport) for VF spoofcheck filtering. - Ingress/Egress ACLs (per vport) configuration: > Created only when at least one of (VST, spoofcheck) is configured. > if (!spoofchk && !vst) allow all traffic. i.e. no ACLs. > if (spoofchk && vst) allow only untagged traffic with smac=original mac \ sent from the VF. > if (spoofchk && !vst) allow only traffic with smac=original mac sent from \ the VF. > if (!spoofchk && vst) allow only untagged traffic. Trusted VF promiscuous and allmulti support in details: - Added two flow groups for allmulti and promisc VFs to the e-Switch FDB table > Allmulti group: One rule that forwards any mcast traffic coming from either uplink or VFs/PF vports. > Promisc group: One rule that forwards all unmatched traffic coming from \ uplink. - Add vport context change event handling for promisc and allmulti If VF is trusted respect the request and: > if allmulti request: add the vport to the allmulti group. and to all other L2 mcast address in the FDB table. > if promisc request: add the vport to the promisc group. > Note: A promisc VF can only see traffic that was not explicitly matched to or requested by any other VF. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c976
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/fs.h7
13 files changed, 1118 insertions, 73 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 679e18ffb3a6..b531d4f3c00b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -6,7 +6,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o vxlan.o en_tc.o
+ en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_RFS_ACCEL) += en_arfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index b4ae0fe15878..3515e78ba68f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#ifdef CONFIG_RFS_ACCEL
+
#include <linux/hash.h>
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
@@ -747,3 +749,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b435c7b36cfb..1c70e518b5c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2438,6 +2438,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
vlan, qos);
}
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
@@ -2607,6 +2622,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
#endif
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
+ .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ff91bb5e1c43..b84a6918a700 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -77,16 +77,20 @@ struct vport_addr {
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ /* A flag indicating that mac was added due to mc promiscuous vport */
+ bool mc_promisc;
};
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
+ PROMISC_CHANGE = BIT(3),
};
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE)
+ MC_ADDR_CHANGE | \
+ PROMISC_CHANGE)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
@@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
if (events_mask & MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
+ if (events_mask & PROMISC_CHANGE)
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ event_on_promisc_change, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -323,30 +330,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
/* E-Switch FDB */
static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+ u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
- int match_header = MLX5_MATCH_OUTER_HEADERS;
- struct mlx5_flow_destination dest;
+ int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_destination dest;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ u8 *dmac_v = NULL;
+ u8 *dmac_c = NULL;
u32 *match_v;
u32 *match_c;
- u8 *dmac_v;
- u8 *dmac_c;
+ if (rx_rule)
+ match_header |= MLX5_MATCH_MISC_PARAMETERS;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
pr_warn("FDB: Failed to alloc match parameters\n");
goto out;
}
+
dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, mac);
- /* Match criteria mask */
- memset(dmac_c, 0xff, 6);
+ if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+ ether_addr_copy(dmac_v, mac_v);
+ ether_addr_copy(dmac_c, mac_c);
+ }
+
+ if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+ mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+ }
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
@@ -373,6 +395,39 @@ out:
return flow_rule;
}
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+
+ eth_broadcast_addr(mac_c);
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ mac_c[0] = 0x01;
+ mac_v[0] = 0x01;
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -407,28 +462,74 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
+ esw->fdb_table.fdb = fdb;
+ /* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
-
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
-
esw->fdb_table.addr_grp = g;
- esw->fdb_table.fdb = fdb;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.promisc_grp = g;
+
out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
+ esw->fdb_table.allmulti_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ esw->fdb_table.addr_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw->fdb_table.fdb = NULL;
+ }
+ }
+
kfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(fdb))
- mlx5_destroy_flow_table(fdb);
return err;
}
@@ -438,10 +539,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
+ mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
esw->fdb_table.addr_grp = NULL;
+ esw->fdb_table.allmulti_grp = NULL;
+ esw->fdb_table.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -511,6 +616,52 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return 0;
}
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+ struct vport_addr *vaddr,
+ struct esw_mc_addr *esw_mc)
+{
+ u8 *mac = vaddr->node.addr;
+ u32 vport_idx = 0;
+
+ for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ struct hlist_head *vport_hash = vport->mc_list;
+ struct vport_addr *iter_vaddr =
+ l2addr_hash_find(vport_hash,
+ mac,
+ struct vport_addr);
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+ vaddr->vport == vport_idx)
+ continue;
+ switch (vaddr->action) {
+ case MLX5_ACTION_ADD:
+ if (iter_vaddr)
+ continue;
+ iter_vaddr = l2addr_hash_add(vport_hash, mac,
+ struct vport_addr,
+ GFP_KERNEL);
+ if (!iter_vaddr) {
+ esw_warn(esw->dev,
+ "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+ mac, vport_idx);
+ continue;
+ }
+ iter_vaddr->vport = vport_idx;
+ iter_vaddr->flow_rule =
+ esw_fdb_set_vport_rule(esw,
+ mac,
+ vport_idx);
+ break;
+ case MLX5_ACTION_DEL:
+ if (!iter_vaddr)
+ continue;
+ mlx5_del_flow_rule(iter_vaddr->flow_rule);
+ l2addr_hash_del(iter_vaddr);
+ break;
+ }
+ }
+}
+
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
struct hlist_head *hash = esw->mc_table;
@@ -531,8 +682,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+ /* Add this multicast mac to all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
add:
- esw_mc->refcnt++;
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't increment the multicast ref count
+ */
+ if (!vaddr->mc_promisc)
+ esw_mc->refcnt++;
+
/* Forward MC MAC to vport */
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev,
@@ -568,9 +728,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
- if (--esw_mc->refcnt)
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't decrement the multicast ref count.
+ */
+ if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
return 0;
+ /* Remove this multicast mac from all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule);
@@ -643,10 +809,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->action = MLX5_ACTION_DEL;
}
+ if (!vport->enabled)
+ goto out;
+
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
mac_list, &size);
if (err)
- return;
+ goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
vport_num, is_uc ? "UC" : "MC", size);
@@ -660,6 +829,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
if (addr) {
addr->action = MLX5_ACTION_NONE;
+ /* If this mac was previously added because of allmulti
+ * promiscuous rx mode, its now converted to be original
+ * vport mac.
+ */
+ if (addr->mc_promisc) {
+ struct esw_mc_addr *esw_mc =
+ l2addr_hash_find(esw->mc_table,
+ mac_list[i],
+ struct esw_mc_addr);
+ if (!esw_mc) {
+ esw_warn(esw->dev,
+ "Failed to MAC(%pM) in mcast DB\n",
+ mac_list[i]);
+ continue;
+ }
+ esw_mc->refcnt++;
+ addr->mc_promisc = false;
+ }
continue;
}
@@ -674,13 +861,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->vport = vport_num;
addr->action = MLX5_ACTION_ADD;
}
+out:
kfree(mac_list);
}
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct l2addr_node *node;
+ struct vport_addr *addr;
+ struct hlist_head *hash;
+ struct hlist_node *tmp;
+ int hi;
+
+ hash = vport->mc_list;
+
+ for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+ u8 *mac = node->addr;
+
+ addr = l2addr_hash_find(hash, mac, struct vport_addr);
+ if (addr) {
+ if (addr->action == MLX5_ACTION_DEL)
+ addr->action = MLX5_ACTION_NONE;
+ continue;
+ }
+ addr = l2addr_hash_add(hash, mac, struct vport_addr,
+ GFP_KERNEL);
+ if (!addr) {
+ esw_warn(esw->dev,
+ "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+ mac, vport_num);
+ continue;
+ }
+ addr->vport = vport_num;
+ addr->action = MLX5_ACTION_ADD;
+ addr->mc_promisc = true;
+ }
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+ bool promisc, bool mc_promisc)
+{
+ struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+ goto promisc;
+
+ if (mc_promisc) {
+ vport->allmulti_rule =
+ esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ if (!allmulti_addr->uplink_rule)
+ allmulti_addr->uplink_rule =
+ esw_fdb_set_vport_allmulti_rule(esw,
+ UPLINK_VPORT);
+ allmulti_addr->refcnt++;
+ } else if (vport->allmulti_rule) {
+ mlx5_del_flow_rule(vport->allmulti_rule);
+ vport->allmulti_rule = NULL;
+
+ if (--allmulti_addr->refcnt > 0)
+ goto promisc;
+
+ if (allmulti_addr->uplink_rule)
+ mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+ allmulti_addr->uplink_rule = NULL;
+ }
+
+promisc:
+ if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+ return;
+
+ if (promisc) {
+ vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+ vport_num);
+ } else if (vport->promisc_rule) {
+ mlx5_del_flow_rule(vport->promisc_rule);
+ vport->promisc_rule = NULL;
+ }
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ int promisc_all = 0;
+ int promisc_uc = 0;
+ int promisc_mc = 0;
+ int err;
+
+ err = mlx5_query_nic_vport_promisc(esw->dev,
+ vport_num,
+ &promisc_uc,
+ &promisc_mc,
+ &promisc_all);
+ if (err)
+ return;
+ esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+ vport_num, promisc_all, promisc_mc);
+
+ if (!vport->trusted || !vport->enabled) {
+ promisc_uc = 0;
+ promisc_mc = 0;
+ promisc_all = 0;
+ }
+
+ esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ (promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
- struct mlx5_vport *vport =
- container_of(work, struct mlx5_vport, vport_change_handler);
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
@@ -699,6 +994,15 @@ static void esw_vport_change_handler(struct work_struct *work)
if (vport->enabled_events & MC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
+ }
+
+ if (vport->enabled_events & PROMISC_CHANGE) {
+ esw_update_vport_rx_mode(esw, vport->vport);
+ if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+ esw_update_vport_mc_promisc(esw, vport->vport);
+ }
+
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
@@ -709,15 +1013,477 @@ static void esw_vport_change_handler(struct work_struct *work)
vport->enabled_events);
}
+static void esw_vport_change_handler(struct work_struct *work)
+{
+ struct mlx5_vport *vport =
+ container_of(work, struct mlx5_vport, vport_change_handler);
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ mutex_lock(&esw->state_lock);
+ esw_vport_change_handle_locked(vport);
+ mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp = NULL;
+ struct mlx5_flow_group *drop_grp = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR_OR_NULL(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(vlan_grp)) {
+ err = PTR_ERR(vlan_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ vport->egress.acl = acl;
+ vport->egress.drop_grp = drop_grp;
+ vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+ kfree(flow_group_in);
+ if (err && !IS_ERR_OR_NULL(vlan_grp))
+ mlx5_destroy_flow_group(vlan_grp);
+ if (err && !IS_ERR_OR_NULL(acl))
+ mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+ if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+ mlx5_del_flow_rule(vport->egress.drop_rule);
+
+ vport->egress.allowed_vlan = NULL;
+ vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+ mlx5_destroy_flow_group(vport->egress.drop_grp);
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.allowed_vlans_grp = NULL;
+ vport->egress.drop_grp = NULL;
+ vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR_OR_NULL(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.acl = acl;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR_OR_NULL(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.drop_grp = g;
+
+out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_spoofchk_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_spoofchk_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ }
+
+ kfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+ mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+ vport->ingress.drop_rule = NULL;
+ vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+ mlx5_destroy_flow_group(vport->ingress.drop_grp);
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ vport->ingress.drop_grp = NULL;
+ vport->ingress.allow_spoofchk_only_grp = NULL;
+ vport->ingress.allow_untagged_only_grp = NULL;
+ vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 smac[ETH_ALEN];
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+ u8 *smac_v;
+
+ if (vport->spoofchk) {
+ err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(smac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
+ }
+ }
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ esw_vport_disable_ingress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_ingress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ if (vport->vlan || vport->qos)
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+
+ if (vport->spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ match_v,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, smac);
+ }
+
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->ingress.drop_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+ err = PTR_ERR(vport->ingress.drop_rule);
+ pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.drop_rule = NULL;
+ goto out;
+ }
+
+out:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos) {
+ esw_vport_disable_egress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_egress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ /* Allowed vlan rule */
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rule(vport->egress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+ /* Drop others rule (star rule) */
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->egress.drop_rule =
+ mlx5_add_flow_rule(vport->egress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+ err = PTR_ERR(vport->egress.drop_rule);
+ pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.drop_rule = NULL;
+ }
+out:
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
+ mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+ if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
@@ -725,53 +1491,32 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handler(&vport->vport_change_handler);
+ esw_vport_change_handle_locked(vport);
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = true;
- spin_unlock_irqrestore(&vport->lock, flags);
+
+ /* only PF is trusted by default */
+ vport->trusted = (vport_num) ? false : true;
arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_vport *vport = &esw->vports[vport_num];
- struct l2addr_node *node;
- struct vport_addr *addr;
- struct hlist_node *tmp;
- int hi;
-
- for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
- for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+ mutex_unlock(&esw->state_lock);
}
static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
if (!vport->enabled)
return;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = false;
- vport->enabled_events = 0;
- spin_unlock_irqrestore(&vport->lock, flags);
+
+ synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,9 +1526,19 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- /* We don't assume VFs will cleanup after themselves */
- esw_cleanup_vport(esw, vport_num);
+ mutex_lock(&esw->state_lock);
+ /* We don't assume VFs will cleanup after themselves.
+ * Calling vport change handler while vport is disabled will cleanup
+ * the vport resources.
+ */
+ esw_vport_change_handle_locked(vport);
+ vport->enabled_events = 0;
+ if (vport_num) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
esw->enabled_vports--;
+ mutex_unlock(&esw->state_lock);
}
/* Public E-Switch API */
@@ -802,6 +1557,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
return -ENOTSUPP;
}
+ if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
esw_disable_vport(esw, 0);
@@ -824,6 +1585,7 @@ abort:
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
+ struct esw_mc_addr *mc_promisc;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
@@ -833,9 +1595,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
esw->enabled_vports);
+ mc_promisc = esw->mc_promisc;
+
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
+ if (mc_promisc && mc_promisc->uplink_rule)
+ mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
esw_destroy_fdb_table(esw);
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
@@ -845,7 +1612,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
- int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+ int total_vports = MLX5_TOTAL_VPORTS(dev);
+ struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -874,6 +1642,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
+ mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+ if (!mc_promisc) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ esw->mc_promisc = mc_promisc;
+
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -887,6 +1662,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ mutex_init(&esw->state_lock);
+
for (vport_num = 0; vport_num < total_vports; vport_num++) {
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -894,7 +1671,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
- spin_lock_init(&vport->lock);
}
esw->total_vports = total_vports;
@@ -925,6 +1701,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
+ kfree(esw->mc_promisc);
kfree(esw->vports);
kfree(esw);
}
@@ -942,10 +1719,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
}
vport = &esw->vports[vport_num];
- spin_lock(&vport->lock);
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
- spin_unlock(&vport->lock);
}
/* Vport Administration */
@@ -957,12 +1732,22 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
int err = 0;
+ struct mlx5_vport *evport;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
+ if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ mlx5_core_warn(esw->dev,
+ "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+ vport);
+ return -EPERM;
+ }
+
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
mlx5_core_warn(esw->dev,
@@ -971,6 +1756,11 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ mutex_lock(&esw->state_lock);
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ mutex_unlock(&esw->state_lock);
+
return err;
}
@@ -990,6 +1780,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
+ struct mlx5_vport *evport;
u16 vlan;
u8 qos;
@@ -998,6 +1789,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -1008,7 +1801,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
ivi->vlan = vlan;
ivi->qos = qos;
- ivi->spoofchk = 0;
+ ivi->spoofchk = evport->spoofchk;
return 0;
}
@@ -1016,6 +1809,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos)
{
+ struct mlx5_vport *evport;
+ int err = 0;
int set = 0;
if (!ESW_ALLOWED(esw))
@@ -1026,7 +1821,72 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan || qos)
set = 1;
- return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ evport = &esw->vports[vport];
+
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ if (err)
+ return err;
+
+ mutex_lock(&esw->state_lock);
+ evport->vlan = vlan;
+ evport->qos = qos;
+ if (evport->enabled) {
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ goto out;
+ err = esw_vport_egress_config(esw, evport);
+ }
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk)
+{
+ struct mlx5_vport *evport;
+ bool pschk;
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ pschk = evport->spoofchk;
+ evport->spoofchk = spoofchk;
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ evport->spoofchk = pschk;
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport, bool setting)
+{
+ struct mlx5_vport *evport;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ evport->trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+ mutex_unlock(&esw->state_lock);
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3416a428f70f..fd6800256d4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,18 +88,40 @@ struct l2addr_node {
kfree(ptr); \
})
+struct vport_ingress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allow_rule;
+ struct mlx5_flow_rule *drop_rule;
+};
+
+struct vport_egress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allowed_vlans_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allowed_vlan;
+ struct mlx5_flow_rule *drop_rule;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
+ struct mlx5_flow_rule *promisc_rule;
+ struct mlx5_flow_rule *allmulti_rule;
struct work_struct vport_change_handler;
- /* This spinlock protects access to vport data, between
- * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
- * once vport marked as disabled new interrupts are discarded.
- */
- spinlock_t lock; /* vport events sync */
+ struct vport_ingress ingress;
+ struct vport_egress egress;
+
+ u16 vlan;
+ u8 qos;
+ bool spoofchk;
+ bool trusted;
bool enabled;
u16 enabled_events;
};
@@ -113,6 +135,8 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
};
struct mlx5_eswitch {
@@ -124,6 +148,11 @@ struct mlx5_eswitch {
struct mlx5_vport *vports;
int total_vports;
int enabled_vports;
+ /* Synchronize between vport change events
+ * and async SRIOV admin state changes
+ */
+ struct mutex state_lock;
+ struct esw_mc_addr *mc_promisc;
};
/* E-Switch API */
@@ -138,6 +167,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state);
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport_num, bool setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index f46f1db0fc00..9797768891ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ if (vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ MLX5_SET(create_flow_table_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
@@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -207,6 +232,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -285,6 +314,10 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, index);
+ if (ft->vport) {
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 9814d4784803..c97b4a03eeed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -34,6 +34,7 @@
#define _MLX5_FS_CMD_
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 4d78d5a48af3..659a6980cda2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -457,7 +457,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type)
{
struct mlx5_flow_table *ft;
@@ -469,6 +469,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->type = table_type;
+ ft->vport = vport;
ft->max_fte = max_fte;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -700,9 +701,9 @@ static void list_add_flow_table(struct mlx5_flow_table *ft,
list_add(&ft->node.list, prev);
}
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio, int max_fte,
- u32 level)
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ u16 vport, int prio,
+ int max_fte, u32 level)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
@@ -732,6 +733,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
*/
level += fs_prio->start_level;
ft = alloc_flow_table(level,
+ vport,
roundup_pow_of_two(max_fte),
root->table_type);
if (!ft) {
@@ -742,7 +744,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ilog2(ft->max_fte);
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -766,6 +768,20 @@ unlock_root:
return ERR_PTR(err);
}
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level)
+{
+ return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level, u16 vport)
+{
+ return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
@@ -1319,6 +1335,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &dev->priv.fdb_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (dev->priv.esw_egress_root_ns)
+ return &dev->priv.esw_egress_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (dev->priv.esw_ingress_root_ns)
+ return &dev->priv.esw_ingress_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -1699,6 +1725,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
}
static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1719,6 +1747,38 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev)
}
}
+static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
+ if (!dev->priv.esw_egress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
+ if (!dev->priv.esw_ingress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
int err = 0;
@@ -1731,8 +1791,21 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
err = init_fdb_root_ns(dev);
if (err)
- cleanup_root_ns(dev);
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
}
+ return 0;
+err:
+ mlx5_cleanup_fs(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index d607e564f454..8e76cc505f5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -45,8 +45,10 @@ enum fs_node_type {
};
enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_FDB = 0X4,
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
};
enum fs_fte_status {
@@ -79,6 +81,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_table {
struct fs_node node;
u32 id;
+ u16 vport;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0b0b226c789e..482604bd051c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,6 +42,8 @@
#define DRIVER_VERSION "3.0-1"
#define DRIVER_RELDATE "January 2015"
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8fecd6d6f814..ee0d5a937f02 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1349,6 +1349,18 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d5529449ef47..9613143f0561 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -518,6 +518,8 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
};
enum mlx5_device_state {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 165ff4f9cc6a..6467569ad76e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -58,6 +58,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
MLX5_FLOW_NAMESPACE_FDB,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ MLX5_FLOW_NAMESPACE_ESW_INGRESS,
};
struct mlx5_flow_table;
@@ -90,6 +92,11 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
u32 level);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio,
+ int num_flow_table_entries,
+ u32 level, u16 vport);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values: