summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-12-09 22:52:17 +0300
committerJason Gunthorpe <jgg@nvidia.com>2022-12-09 22:52:17 +0300
commitd69e8c63fcbbf695ff7ff2c6d26efead23cfbb3a (patch)
tree4d714ecd331233069ab718989bb017dfd934e129 /drivers/net/ethernet/mellanox
parent6cfe7bd0dfd33033683639039b5608d6534c19eb (diff)
parent76dcd734eca23168cb008912c0f69ff408905235 (diff)
downloadlinux-d69e8c63fcbbf695ff7ff2c6d26efead23cfbb3a.tar.xz
Merge tag 'v6.1-rc8' into rdma.git for-next
For dependencies in following patches Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
32 files changed, 541 insertions, 313 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b149e601f673..48cfaa7eaf50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -697,7 +697,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
- mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ if (!err)
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 46ba4c2faad2..e7a894ba5c3e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,8 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/tout.h"
+#define CREATE_TRACE_POINTS
+#include "diag/cmd_tracepoint.h"
enum {
CMD_IF_REV = 5,
@@ -785,27 +787,14 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
- u32 syndrome;
- u8 status;
u16 uid;
- int err;
-
- syndrome = MLX5_GET(mbox_out, out, syndrome);
- status = MLX5_GET(mbox_out, out, status);
opcode = MLX5_GET(mbox_in, in, opcode);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
- err = cmd_status_to_err(status);
-
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
- else
- mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
- mlx5_command_str(opcode), opcode, op_mod, uid,
- cmd_status_str(status), status, syndrome, err);
}
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
@@ -1016,6 +1005,7 @@ static void cmd_work_handler(struct work_struct *work)
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
ent->ret = -ENXIO;
@@ -1023,7 +1013,6 @@ static void cmd_work_handler(struct work_struct *work)
return;
}
- cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -1508,8 +1497,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
return -EFAULT;
err = sscanf(outlen_str, "%d", &outlen);
- if (err < 0)
- return err;
+ if (err != 1)
+ return -EINVAL;
ptr = kzalloc(outlen, GFP_KERNEL);
if (!ptr)
@@ -1672,8 +1661,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
- pci_channel_offline(dev->pdev) || /* FW is inaccessible */
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
+ !opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
ent->ts2 = ktime_get_ns();
@@ -1770,12 +1759,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
@@ -1887,6 +1881,16 @@ out_in:
return err;
}
+static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome,
+ cmd_status_to_err(status));
+}
+
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
u32 syndrome, int err)
{
@@ -1909,7 +1913,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
-static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
u8 status = MLX5_GET(mbox_out, out, status);
@@ -1917,8 +1921,10 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *
if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
err = -EIO;
- if (!err && status != MLX5_CMD_STAT_OK)
+ if (!err && status != MLX5_CMD_STAT_OK) {
err = -EREMOTEIO;
+ mlx5_cmd_err_trace(dev, opcode, op_mod, out);
+ }
cmd_status_log(dev, opcode, status, syndrome, err);
return err;
@@ -1946,9 +1952,9 @@ int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out);
- return err;
+ return cmd_status_err(dev, err, opcode, op_mod, out);
}
EXPORT_SYMBOL(mlx5_cmd_do);
@@ -1992,8 +1998,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
- err = cmd_status_err(dev, err, opcode, out);
+ err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
@@ -2029,7 +2036,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_ctx *ctx;
ctx = work->ctx;
- status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
@@ -2044,6 +2051,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
new file mode 100644
index 000000000000..406ebe17405f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_CMD_TP_H_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+TRACE_EVENT(mlx5_cmd,
+ TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
+ const char *status_str, u8 status, u32 syndrome, int err),
+ TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
+ TP_STRUCT__entry(__string(command_str, command_str)
+ __field(u16, opcode)
+ __field(u16, op_mod)
+ __string(status_str, status_str)
+ __field(u8, status)
+ __field(u32, syndrome)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(command_str, command_str);
+ __entry->opcode = opcode;
+ __entry->op_mod = op_mod;
+ __assign_str(status_str, status_str);
+ __entry->status = status;
+ __entry->syndrome = syndrome;
+ __entry->err = err;
+ ),
+ TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
+ __get_str(command_str), __entry->opcode, __entry->op_mod,
+ __get_str(status_str), __entry->status, __entry->syndrome,
+ __entry->err)
+);
+
+#endif /* _MLX5_CMD_TP_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cmd_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 978a2bb8e122..21831386b26e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -638,7 +638,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
- trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 39ef2a2561a3..8099a21e674c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
return err;
}
+static int
+mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct net_device *lower;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
+ return 0;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(lower))
+ continue;
+
+ priv = netdev_priv(lower);
+ mdev = priv->mdev;
+ if (!mlx5_lag_is_active(mdev))
+ return -EAGAIN;
+ if (!mlx5_lag_is_shared_fdb(mdev))
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
break;
case NETDEV_CHANGEUPPER:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 305fde62a78d..3337241cfd84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -6,70 +6,42 @@
#include "en/tc_priv.h"
#include "mlx5_core.h"
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- &mlx5e_tc_act_trap,
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_mirred,
- &mlx5e_tc_act_redirect_ingress,
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan_mangle,
- &mlx5e_tc_act_tun_encap,
- &mlx5e_tc_act_tun_decap,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- NULL, /* FLOW_ACTION_MARK, */
- &mlx5e_tc_act_ptype,
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- &mlx5e_tc_act_sample,
- &mlx5e_tc_act_police,
- &mlx5e_tc_act_ct,
- NULL, /* FLOW_ACTION_CT_METADATA, */
- &mlx5e_tc_act_mpls_push,
- &mlx5e_tc_act_mpls_pop,
- NULL, /* FLOW_ACTION_MPLS_MANGLE, */
- NULL, /* FLOW_ACTION_GATE, */
- NULL, /* FLOW_ACTION_PPPOE_PUSH, */
- NULL, /* FLOW_ACTION_JUMP, */
- NULL, /* FLOW_ACTION_PIPE, */
- &mlx5e_tc_act_vlan,
- &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
+ [FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
+ [FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
+ [FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
+ [FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
+ [FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
+ [FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
+ [FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
+ [FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
+ [FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
};
-/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
- &mlx5e_tc_act_accept,
- &mlx5e_tc_act_drop,
- NULL, /* FLOW_ACTION_TRAP, */
- &mlx5e_tc_act_goto,
- &mlx5e_tc_act_mirred_nic,
- NULL, /* FLOW_ACTION_MIRRED, */
- NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
- NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
- NULL, /* FLOW_ACTION_VLAN_PUSH, */
- NULL, /* FLOW_ACTION_VLAN_POP, */
- NULL, /* FLOW_ACTION_VLAN_MANGLE, */
- NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
- NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_pedit,
- &mlx5e_tc_act_csum,
- &mlx5e_tc_act_mark,
- NULL, /* FLOW_ACTION_PTYPE, */
- NULL, /* FLOW_ACTION_PRIORITY, */
- NULL, /* FLOW_ACTION_WAKE, */
- NULL, /* FLOW_ACTION_QUEUE, */
- NULL, /* FLOW_ACTION_SAMPLE, */
- NULL, /* FLOW_ACTION_POLICE, */
- &mlx5e_tc_act_ct,
+ [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
+ [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
+ [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
+ [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
+ [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
+ [FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
+ [FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
};
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 5aff97914367..ff73d25bc6eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -224,15 +224,16 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- spec = &flow->attr->parse_attr->spec;
-
- /* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+
+ /* update from encap rule to slow path rule */
+ spec = &flow->attr->parse_attr->spec;
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -251,6 +252,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ e->pkt_reformat = NULL;
}
static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
@@ -762,8 +764,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid)
+ struct net_device **encap_dev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -878,9 +879,8 @@ attach_flow:
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- *encap_valid = true;
} else {
- *encap_valid = false;
+ flow_flag_set(flow, SLOW);
}
mutex_unlock(&esw->offloads.encap_tbl_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
index d542b8476491..8ad273dde40e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -17,8 +17,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid);
+ struct net_device **encap_dev);
int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index cb164b62f543..853f312cd757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -11,6 +11,27 @@
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
+/* IPSEC inline data includes:
+ * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
+ * next header.
+ * 2. ESP authentication data: 16 bytes for ICV.
+ */
+#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
+ 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
+
+/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
+ * encapsulations.
+ */
+#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
+ MLX5_SEND_WQE_DS)
+
+/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
+#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
+ MLX5E_MAX_TX_INLINE_DS + \
+ MLX5E_MAX_TX_IPSEC_DS + \
+ MAX_SKB_FRAGS + 1, \
+ MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
+ WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
* 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
"wqe_size %u is greater than max SQ WQEBBs %u",
wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
-
return MLX5E_STOP_ROOM(wqe_size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 4685c652c97e..20507ef2f956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.page.rq = rq;
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
- dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
+ dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
if (unlikely(xdp_frame_has_frags(xdpf))) {
sinfo = xdp_get_shared_info_from_frame(xdpf);
@@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
skb_frag_off(frag);
len = skb_frag_size(frag);
dma_sync_single_for_device(sq->pdev, addr, len,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 285d32d2fd08..d7c020f72401 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(fs, i);
- kfree(accel_tcp);
+ kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
}
@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(fs, i);
- kfree(accel_tcp);
+ kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 2ef36cb9555a..f900709639f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -368,15 +368,15 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state;
- if (is_tx) {
- obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
- key = &ctx->sa.tx_sa->key;
- } else {
- obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
- key = &ctx->sa.rx_sa->key;
+ key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
+
+ if (sa->epn_state.epn_enabled) {
+ obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
+ cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
}
- memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
obj_attrs.replay_window = ctx->secy->replay_window;
obj_attrs.replay_protect = ctx->secy->replay_protect;
@@ -427,15 +427,15 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
return NULL;
}
-static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *rx_sa,
- bool active)
+static int macsec_rx_sa_active_update(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
{
- struct mlx5_core_dev *mdev = macsec->mdev;
- struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
int err = 0;
- if (rx_sa->active != active)
+ if (rx_sa->active == active)
return 0;
rx_sa->active = active;
@@ -444,13 +444,11 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
return 0;
}
- attrs.sci = cpu_to_be64((__force u64)rx_sa->sci);
- attrs.enc_key_id = rx_sa->enc_key_id;
- err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err)
- return err;
+ rx_sa->active = false;
- return 0;
+ return err;
}
static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
@@ -476,6 +474,11 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
return false;
}
+ if (!ctx->secy->tx_sc.encrypt) {
+ netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
+ return false;
+ }
+
return true;
}
@@ -620,6 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
if (tx_sa->active == ctx_tx_sa->active)
goto out;
+ tx_sa->active = ctx_tx_sa->active;
if (tx_sa->assoc_num != tx_sc->encoding_sa)
goto out;
@@ -635,8 +639,6 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
}
-
- tx_sa->active = ctx_tx_sa->active;
out:
mutex_unlock(&macsec->lock);
@@ -736,9 +738,14 @@ static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
sc_xarray_element->rx_sc = rx_sc;
err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
- XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
- if (err)
+ XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ netdev_err(ctx->netdev,
+ "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
+ MLX5_MACEC_RX_FS_ID_MAX);
goto destroy_sc_xarray_elemenet;
+ }
rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
if (!rx_sc->md_dst) {
@@ -798,16 +805,16 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
goto out;
}
- rx_sc->active = ctx_rx_sc->active;
if (rx_sc->active == ctx_rx_sc->active)
goto out;
+ rx_sc->active = ctx_rx_sc->active;
for (i = 0; i < MACSEC_NUM_AN; ++i) {
rx_sa = rx_sc->rx_sa[i];
if (!rx_sa)
continue;
- err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
if (err)
goto out;
}
@@ -818,16 +825,43 @@ out:
return err;
}
+static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
+{
+ struct mlx5e_macsec_sa *rx_sa;
+ int i;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ /* At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+ kfree_rcu(rx_sc);
+}
+
static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
{
struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc;
- struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec *macsec;
struct list_head *list;
int err = 0;
- int i;
mutex_lock(&priv->macsec->lock);
@@ -849,31 +883,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
goto out;
}
- for (i = 0; i < MACSEC_NUM_AN; ++i) {
- rx_sa = rx_sc->rx_sa[i];
- if (!rx_sa)
- continue;
-
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
- mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
-
- kfree(rx_sa);
- rx_sc->rx_sa[i] = NULL;
- }
-
-/*
- * At this point the relevant MACsec offload Rx rule already removed at
- * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
- * Rx related data propagating using xa_erase which uses rcu to sync,
- * once fs_id is erased then this rx_sc is hidden from datapath.
- */
- list_del_rcu(&rx_sc->rx_sc_list_element);
- xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
- metadata_dst_free(rx_sc->md_dst);
- kfree(rx_sc->sc_xarray_element);
-
- kfree_rcu(rx_sc);
-
+ macsec_del_rxsc_ctx(macsec, rx_sc);
out:
mutex_unlock(&macsec->lock);
@@ -1015,7 +1025,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out;
}
- err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+ err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
out:
mutex_unlock(&macsec->lock);
@@ -1155,7 +1165,7 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
if (err)
goto out;
}
@@ -1234,7 +1244,6 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
- struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec_sa *tx_sa;
struct mlx5e_macsec *macsec;
struct list_head *list;
@@ -1263,28 +1272,15 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
}
list = &macsec_device->macsec_rx_sc_list_head;
- list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
- for (i = 0; i < MACSEC_NUM_AN; ++i) {
- rx_sa = rx_sc->rx_sa[i];
- if (!rx_sa)
- continue;
-
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
- mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
- kfree(rx_sa);
- rx_sc->rx_sa[i] = NULL;
- }
-
- list_del_rcu(&rx_sc->rx_sc_list_element);
-
- kfree_rcu(rx_sc);
- }
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
+ macsec_del_rxsc_ctx(macsec, rx_sc);
kfree(macsec_device->dev_addr);
macsec_device->dev_addr = NULL;
list_del_rcu(&macsec_device->macsec_device_list_element);
--macsec->num_of_devices;
+ kfree(macsec_device);
out:
mutex_unlock(&macsec->lock);
@@ -1536,6 +1532,8 @@ static void macsec_async_event(struct work_struct *work)
async_work = container_of(work, struct mlx5e_macsec_async_work, work);
macsec = async_work->macsec;
+ mutex_lock(&macsec->lock);
+
mdev = async_work->mdev;
obj_id = async_work->obj_id;
macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
@@ -1557,6 +1555,7 @@ static void macsec_async_event(struct work_struct *work)
out_async_work:
kfree(async_work);
+ mutex_unlock(&macsec->lock);
}
static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -1745,7 +1744,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
if (!macsec)
return;
- fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+ fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
rcu_read_lock();
sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
index d580b4a91253..347380a2cd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -10,9 +10,11 @@
#include <net/macsec.h>
#include <net/dst_metadata.h>
-/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
+#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
+#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
-#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
struct mlx5e_priv;
struct mlx5e_macsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
index 1ac0cf04e811..5b658a5588c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -250,7 +250,7 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
- int err = 0;
+ int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
if (!ns)
@@ -261,8 +261,10 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
+ if (!flow_group_in) {
+ err = -ENOMEM;
goto out_spec;
+ }
tx_tables = &tx_fs->tables;
ft_crypto = &tx_tables->ft_crypto;
@@ -898,7 +900,7 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
- int err = 0;
+ int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
if (!ns)
@@ -909,8 +911,10 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!flow_group_in)
+ if (!flow_group_in) {
+ err = -ENOMEM;
goto free_spec;
+ }
rx_tables = &rx_fs->tables;
ft_crypto = &rx_tables->ft_crypto;
@@ -1142,10 +1146,10 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
ft_crypto = &rx_tables->ft_crypto;
/* Set bit[31 - 30] macsec marker - 0x01 */
- /* Set bit[3-0] fs id */
+ /* Set bit[15-0] fs id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));
MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32);
@@ -1205,6 +1209,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
rx_rule->rule[1] = rule;
}
+ kvfree(spec);
return macsec_rule;
err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 24aa25da482b..1728e197558d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -35,7 +35,6 @@
#include "en.h"
#include "en/port.h"
#include "en/params.h"
-#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
@@ -412,15 +411,8 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
mutex_lock(&priv->state_lock);
-
ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
- if (priv->xsk.refcnt) {
- /* The upper half are XSK queues. */
- ch->max_combined *= 2;
- ch->combined_count *= 2;
- }
-
mutex_unlock(&priv->state_lock);
}
@@ -454,16 +446,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
- /* Don't allow changing the number of channels if there is an active
- * XSK, because the numeration of the XSK and regular RQs will change.
- */
- if (priv->xsk.refcnt) {
- err = -EINVAL;
- netdev_err(priv->netdev, "%s: AF_XDP is active, cannot change the number of channels\n",
- __func__);
- goto out;
- }
-
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 364f04309149..5e41dfdf79c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -206,10 +206,11 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u32 sz;
- WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+ sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
- return entries * umr_entry_size / MLX5_OCTWORD;
+ return sz / MLX5_OCTWORD;
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
@@ -5694,6 +5695,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ /* Validate the max_wqe_size_sq capability. */
+ if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
+ mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
+ mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
+ return -EIO;
+ }
+
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58084650151f..a61a43fc8d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni
addr = page_pool_get_dma_addr(au->page);
/* Non-XSK always uses PAGE_SIZE. */
- dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
return true;
}
@@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u
return -ENOMEM;
/* Non-XSK always uses PAGE_SIZE. */
- addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
- rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
page_pool_recycle_direct(rq->page_pool, au->page);
au->page = NULL;
@@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
{
dma_addr_t addr = page_pool_get_dma_addr(au->page);
- dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
+ rq->buff.map_dir);
page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
au->page, frag_offset, len, truesize);
}
static inline void
-mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
+mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
@@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
+ rq->buff.map_dir);
skb_copy_to_linear_data(skb, from, len);
}
@@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
- frag_size, DMA_FROM_DEVICE);
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
@@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
- rq->buff.frame0_sz, DMA_FROM_DEVICE);
+ rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
@@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
- frag_consumed_bytes, DMA_FROM_DEVICE);
+ frag_consumed_bytes, rq->buff.map_dir);
if (!xdp_buff_has_frags(&xdp)) {
/* Init on the first fragment to avoid cold cache access
@@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
addr = page_pool_get_dma_addr(head_au->page);
- mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
+ mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
addr = page_pool_get_dma_addr(au->page);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
- frag_size, DMA_FROM_DEVICE);
+ frag_size, rq->buff.map_dir);
net_prefetch(data);
prog = rcu_dereference(rq->xdp_prog);
@@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
prefetchw(hdr);
prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
@@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
prefetchw(skb->data);
- mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
+ mlx5e_copy_skb_header(rq, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dd6fea9e9a5b..bd9936af4582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1634,7 +1634,6 @@ set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack,
- bool *encap_valid,
bool *vf_tun)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
@@ -1651,7 +1650,6 @@ set_encap_dests(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
*vf_tun = false;
- *encap_valid = true;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
@@ -1668,7 +1666,7 @@ set_encap_dests(struct mlx5e_priv *priv,
goto out;
}
err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
- extack, &encap_dev, encap_valid);
+ extack, &encap_dev);
dev_put(out_dev);
if (err)
goto out;
@@ -1732,8 +1730,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
- bool vf_tun, encap_valid;
u32 max_prio, max_chain;
+ bool vf_tun;
int err = 0;
parse_attr = attr->parse_attr;
@@ -1823,7 +1821,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
+ err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
@@ -1853,7 +1851,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid || flow_flag_test(flow, SLOW))
+ if (flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
@@ -3633,10 +3631,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->action = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
- attr2->esw_attr->out_count = 0;
- attr2->esw_attr->split_count = 0;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+ attr2->esw_attr->out_count = 0;
+ attr2->esw_attr->split_count = 0;
+ }
+
return attr2;
}
@@ -3755,7 +3757,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
- bool vf_tun, encap_valid = true;
+ bool vf_tun;
int err;
/* This is going in reverse order as needed.
@@ -3777,13 +3779,10 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs))
break;
- err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto out_free;
- if (!encap_valid)
- flow_flag_set(flow, SLOW);
-
err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;
@@ -4758,12 +4757,6 @@ int mlx5e_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6adca01fbdc9..f7897ddb29c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
u16 ds_cnt_inl = 0;
u16 ds_cnt_ids = 0;
+ /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
+
if (attr->insz)
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
MLX5_SEND_WQE_DS);
@@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
inl += VLAN_HLEN;
ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
+ netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
+ (u16)MLX5E_MAX_TX_INLINE_DS);
ds_cnt += ds_cnt_inl;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c59107fa9e6d..374e3fbdc2cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1362,6 +1362,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
devl_rate_nodes_destroy(devlink);
}
+ /* Destroy legacy fdb when disabling sriov in legacy mode. */
+ if (esw->mode == MLX5_ESWITCH_LEGACY)
+ mlx5_eswitch_disable_locked(esw);
esw->esw_funcs.num_vfs = 0;
@@ -1387,12 +1390,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
- if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_disable(esw);
- else if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_legacy_disable(esw);
- mlx5_esw_acls_ns_cleanup(esw);
+ if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
+ esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS)
+ esw_offloads_disable(esw);
+ else if (esw->mode == MLX5_ESWITCH_LEGACY)
+ esw_legacy_disable(esw);
+ mlx5_esw_acls_ns_cleanup(esw);
+ }
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
devl_rate_nodes_destroy(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index f68dc2d0dbe6..3029bc1c0dd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -736,6 +736,14 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
+{
+ if (mlx5_esw_allowed(esw))
+ return esw->esw_funcs.num_vfs;
+
+ return 0;
+}
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4e50df3139c6..8c6c9bcb3dc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -433,7 +433,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
mlx5_lag_mpesw_is_activated(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
- if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
@@ -2310,7 +2310,7 @@ out_free:
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
esw->mode = MLX5_ESWITCH_OFFLOADS;
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
@@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
esw->mode = MLX5_ESWITCH_LEGACY;
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to legacy");
- }
mlx5_rescan_drivers(esw->dev);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
@@ -3389,19 +3384,19 @@ err_metadata:
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
- int err, err1;
+ int err;
esw->mode = MLX5_ESWITCH_LEGACY;
+
+ /* If changing from switchdev to legacy mode without sriov enabled,
+ * no need to create legacy fdb.
+ */
+ if (!mlx5_sriov_is_enabled(esw->dev))
+ return 0;
+
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err) {
+ if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
- esw->mode = MLX5_ESWITCH_OFFLOADS;
- err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
- if (err1) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed setting eswitch back to offloads");
- }
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index ee568bf34ae2..edd910258314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
sizeof(dest->vport.num), hash);
hash = jhash((const void *)&dest->vport.vhca_id,
sizeof(dest->vport.num), hash);
- if (dest->vport.pkt_reformat)
- hash = jhash(dest->vport.pkt_reformat,
- sizeof(*dest->vport.pkt_reformat),
+ if (flow_act->pkt_reformat)
+ hash = jhash(flow_act->pkt_reformat,
+ sizeof(*flow_act->pkt_reformat),
hash);
return hash;
}
@@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
if (ret)
return ret;
- return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
- memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
- sizeof(*dest1->vport.pkt_reformat)) : 0;
+ if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
+ return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
+ sizeof(*flow_act1->pkt_reformat));
+
+ return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
}
static int
@@ -310,6 +312,8 @@ revert_changes:
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ attr->dests[curr_dest].termtbl = NULL;
+
/* search for the destination associated with the
* current term table
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 07c583996c29..1e46f9afa40e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -9,7 +9,8 @@ enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
- MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
+ MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
};
struct mlx5_fw_reset {
@@ -152,7 +153,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
mlx5_unload_one(dev);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
- mlx5_load_one(dev, false);
+ else
+ mlx5_load_one(dev, false);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -405,7 +407,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
err = mlx5_pci_link_toggle(dev);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
- goto done;
+ set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
mlx5_enter_error_state(dev, true);
@@ -481,6 +483,10 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
goto out;
}
err = fw_reset->ret;
+ if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
+ mlx5_unload_one_devl_locked(dev);
+ mlx5_load_one_devl_locked(dev, false);
+ }
out:
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index a9f4ede4a9bf..32c3e0a649a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -228,9 +228,8 @@ static void mlx5_ldev_free(struct kref *ref)
if (ldev->nb.notifier_call)
unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
- mlx5_lag_mpesw_cleanup(ldev);
- cancel_work_sync(&ldev->mpesw_work);
destroy_workqueue(ldev->wq);
+ mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
kfree(ldev);
}
@@ -701,10 +700,13 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return false;
#ifdef CONFIG_MLX5_ESWITCH
- dev = ldev->pf[MLX5_LAG_P1].dev;
- if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev))
- return false;
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
+ return false;
+ }
+ dev = ldev->pf[MLX5_LAG_P1].dev;
mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index ce2ce8ccbd70..f30ac2de639f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -50,6 +50,19 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
+enum mpesw_op {
+ MLX5_MPESW_OP_ENABLE,
+ MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+ struct work_struct work;
+ struct mlx5_lag *lag;
+ enum mpesw_op op;
+ struct completion comp;
+ int result;
+};
+
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@@ -66,7 +79,6 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct workqueue_struct *wq;
struct delayed_work bond_work;
- struct work_struct mpesw_work;
struct notifier_block nb;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index f643202b29c6..c17e8f1ec914 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -7,63 +7,95 @@
#include "eswitch.h"
#include "lib/mlx5.h"
-void mlx5_mpesw_work(struct work_struct *work)
+static int add_mpesw_rule(struct mlx5_lag *ldev)
{
- struct mlx5_lag *ldev = container_of(work, struct mlx5_lag, mpesw_work);
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int err;
- mutex_lock(&ldev->lock);
- mlx5_disable_lag(ldev);
- mutex_unlock(&ldev->lock);
-}
+ if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
+ return 0;
-static void mlx5_lag_disable_mpesw(struct mlx5_core_dev *dev)
-{
- struct mlx5_lag *ldev = dev->priv.lag;
+ if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ err = -EINVAL;
+ goto out_err;
+ }
- if (!queue_work(ldev->wq, &ldev->mpesw_work))
- mlx5_core_warn(dev, "failed to queue work\n");
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
+ return err;
}
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+static void del_mpesw_rule(struct mlx5_lag *ldev)
{
- struct mlx5_lag *ldev = dev->priv.lag;
+ if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
+ ldev->mode == MLX5_LAG_MODE_MPESW)
+ mlx5_disable_lag(ldev);
+}
- if (!ldev)
- return;
+static void mlx5_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
+ struct mlx5_lag *ldev = mpesww->lag;
mutex_lock(&ldev->lock);
- if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
- ldev->mode == MLX5_LAG_MODE_MPESW)
- mlx5_lag_disable_mpesw(dev);
+ if (mpesww->op == MLX5_MPESW_OP_ENABLE)
+ mpesww->result = add_mpesw_rule(ldev);
+ else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
+ del_mpesw_rule(ldev);
mutex_unlock(&ldev->lock);
+
+ complete(&mpesww->comp);
}
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
+ enum mpesw_op op)
{
struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_mpesw_work_st *work;
int err = 0;
if (!ldev)
return 0;
- mutex_lock(&ldev->lock);
- if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
- goto out;
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
- if (ldev->mode != MLX5_LAG_MODE_NONE) {
+ INIT_WORK(&work->work, mlx5_mpesw_work);
+ init_completion(&work->comp);
+ work->op = op;
+ work->lag = ldev;
+
+ if (!queue_work(ldev->wq, &work->work)) {
+ mlx5_core_warn(dev, "failed to queue mpesw work\n");
err = -EINVAL;
goto out;
}
-
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
- if (err)
- mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
-
+ wait_for_completion(&work->comp);
+ err = work->result;
out:
- mutex_unlock(&ldev->lock);
+ kfree(work);
return err;
}
+void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
+}
+
+int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+{
+ return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
+}
+
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
{
struct mlx5_lag *ldev = mdev->priv.lag;
@@ -71,12 +103,9 @@ int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
- mutex_lock(&ldev->lock);
- if (ldev->mode == MLX5_LAG_MODE_MPESW) {
- mutex_unlock(&ldev->lock);
+ if (ldev->mode == MLX5_LAG_MODE_MPESW)
return -EOPNOTSUPP;
- }
- mutex_unlock(&ldev->lock);
+
return 0;
}
@@ -90,11 +119,10 @@ bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
{
- INIT_WORK(&ldev->mpesw_work, mlx5_mpesw_work);
atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
}
void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
{
- cancel_delayed_work_sync(&ldev->bond_work);
+ WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index be4abcb8fcd5..88e8daffcf92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -12,7 +12,6 @@ struct lag_mpesw {
atomic_t mpesw_rule_count;
};
-void mlx5_mpesw_work(struct work_struct *work);
int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 283c4cc28944..e58775a7d955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1798,7 +1798,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
- mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
+ __func__, dev->state, dev->pci_status, res, result2str(res));
return res;
}
@@ -1837,7 +1838,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- mlx5_pci_trace(dev, "Enter\n");
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
+ __func__, dev->state, dev->pci_status);
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -1859,7 +1861,8 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
res = PCI_ERS_RESULT_RECOVERED;
out:
- mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
+ mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
+ __func__, dev->state, dev->pci_status, err, res, result2str(res));
return res;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 7da012ff0d41..8e2abbab05f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -18,6 +18,10 @@ struct mlx5_sf_dev_table {
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
+ struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
+ struct workqueue_struct *active_wq;
+ struct work_struct work;
+ u8 stop_active_wq:1;
struct mlx5_core_dev *dev;
};
@@ -168,6 +172,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0;
sf_index = event->function_id - base_id;
+ mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID:
@@ -191,6 +196,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default:
break;
}
+ mutex_unlock(&table->table_lock);
return 0;
}
@@ -215,6 +221,78 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0;
}
+static void mlx5_sf_dev_add_active_work(struct work_struct *work)
+{
+ struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_core_dev *dev = table->dev;
+ u16 max_functions;
+ u16 function_id;
+ u16 sw_func_id;
+ int err = 0;
+ u8 state;
+ int i;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ for (i = 0; i < max_functions; i++, function_id++) {
+ if (table->stop_active_wq)
+ return;
+ err = mlx5_cmd_query_vhca_state(dev, function_id, out, sizeof(out));
+ if (err)
+ /* A failure of specific vhca doesn't mean others will
+ * fail as well.
+ */
+ continue;
+ state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+ if (state != MLX5_VHCA_STATE_ACTIVE)
+ continue;
+
+ sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
+ mutex_lock(&table->table_lock);
+ /* Don't probe device which is already probe */
+ if (!xa_load(&table->devices, i))
+ mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
+ /* There is a race where SF got inactive after the query
+ * above. e.g.: the query returns that the state of the
+ * SF is active, and after that the eswitch manager set it to
+ * inactive.
+ * This case cannot be managed in SW, since the probing of the
+ * SF is on one system, and the inactivation is on a different
+ * system.
+ * If the inactive is done after the SF perform init_hca(),
+ * the SF will fully probe and then removed. If it was
+ * done before init_hca(), the SF probe will fail.
+ */
+ mutex_unlock(&table->table_lock);
+ }
+}
+
+/* In case SFs are generated externally, probe active SFs */
+static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
+{
+ if (MLX5_CAP_GEN(table->dev, eswitch_manager))
+ return 0; /* the table is local */
+
+ /* Use a workqueue to probe active SFs, which are in large
+ * quantity and may take up to minutes to probe.
+ */
+ table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
+ if (!table->active_wq)
+ return -ENOMEM;
+ INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
+ queue_work(table->active_wq, &table->work);
+ return 0;
+}
+
+static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
+{
+ if (table->active_wq) {
+ table->stop_active_wq = true;
+ destroy_workqueue(table->active_wq);
+ }
+}
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
@@ -240,11 +318,17 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->base_address = pci_resource_start(dev->pdev, 2);
table->max_sfs = max_sfs;
xa_init(&table->devices);
+ mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
+
+ err = mlx5_sf_dev_queue_active_work(table);
+ if (err)
+ goto add_active_err;
+
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
@@ -252,6 +336,8 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
return;
arm_err:
+ mlx5_sf_dev_destroy_active_work(table);
+add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
vhca_err:
table->max_sfs = 0;
@@ -279,7 +365,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
+ mlx5_sf_dev_destroy_active_work(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+ mutex_destroy(&table->table_lock);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index 31d443dd8386..f68461b13391 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -46,7 +46,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action)
{
- int ret;
+ int ret = -EOPNOTSUPP;
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
@@ -67,6 +67,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
goto out;
}
+ if (ret)
+ goto out;
+
/* Release old action */
if (tbl->miss_action)
refcount_dec(&tbl->miss_action->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 4efccd942fb8..1290b2d3eae6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -3470,6 +3470,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
u16 vid;
vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
+ if (!vxlan_fdb_info->offloaded)
+ return;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)