diff options
24 files changed, 665 insertions, 207 deletions
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index 0b61df52332a..290ea8ac3838 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -433,8 +433,7 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) dev->port[port_num].dbg_cc_params = dbg_cc_params; - dbg_cc_params->root = debugfs_create_dir("cc_params", - mdev->priv.dbg_root); + dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev)); for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) { dbg_cc_params->params[i].offset = i; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 85f526c861e9..32a0ea820573 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4178,7 +4178,7 @@ static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root) return 0; - root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root); + root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev)); dev->delay_drop.dir_debugfs = root; debugfs_create_atomic_t("num_timeout_events", 0400, root, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 06e4b8cea6bd..32cb7068f0ca 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -696,7 +696,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) if (!mlx5_debugfs_root || dev->is_rep) return; - cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); + cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index a7170ab3af97..b94cca45eb78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -103,6 +103,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ steering/dr_ste_v0.o steering/dr_ste_v1.o \ + steering/dr_ste_v2.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o \ steering/dr_dbg.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 823d5808d5a0..3b61224e9a3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1543,7 +1543,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev) { struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; - dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); @@ -1877,16 +1877,38 @@ out_in: return err; } +static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, int err) +{ + struct mlx5_cmd_stats *stats; + + if (!err) + return; + + stats = &dev->cmd.stats[opcode]; + spin_lock_irq(&stats->lock); + stats->failed++; + if (err < 0) + stats->last_failed_errno = -err; + if (err == -EREMOTEIO) { + stats->failed_mbox_status++; + stats->last_failed_mbox_status = status; + } + spin_unlock_irq(&stats->lock); +} + /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ -static int cmd_status_err(int err, void *out) +static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out) { - if (err) /* -EREMOTEIO is preserved */ - return err == -EREMOTEIO ? -EIO : err; + u8 status = MLX5_GET(mbox_out, out, status); - if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK) - return -EREMOTEIO; + if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ + err = -EIO; - return 0; + if (!err && status != MLX5_CMD_STAT_OK) + err = -EREMOTEIO; + + cmd_status_log(dev, opcode, status, err); + return err; } /** @@ -1910,8 +1932,10 @@ static int cmd_status_err(int err, void *out) int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + u16 opcode = MLX5_GET(mbox_in, in, opcode); - return cmd_status_err(err, out); + err = cmd_status_err(dev, err, opcode, out); + return err; } EXPORT_SYMBOL(mlx5_cmd_do); @@ -1954,8 +1978,9 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); + u16 opcode = MLX5_GET(mbox_in, in, opcode); - err = cmd_status_err(err, out); + err = cmd_status_err(dev, err, opcode, out); return mlx5_cmd_check(dev, err, in, out); } EXPORT_SYMBOL(mlx5_cmd_exec_polling); @@ -1991,7 +2016,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) struct mlx5_async_ctx *ctx; ctx = work->ctx; - status = cmd_status_err(status, work->out); + status = cmd_status_err(ctx->dev, status, work->opcode, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) wake_up(&ctx->wait); @@ -2005,6 +2030,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; + work->opcode = MLX5_GET(mbox_in, in, opcode); work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 10d195042ab5..d69bac93a83b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -99,26 +99,32 @@ void mlx5_unregister_debugfs(void) debugfs_remove(mlx5_debugfs_root); } +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev) +{ + return dev->priv.dbg.dbg_root; +} +EXPORT_SYMBOL(mlx5_debugfs_get_dev_root); + void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root); } EXPORT_SYMBOL(mlx5_qp_debugfs_init); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.qp_debugfs); + debugfs_remove_recursive(dev->priv.dbg.qp_debugfs); } EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup); void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root); } void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.eq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.eq_debugfs); } static ssize_t average_read(struct file *filp, char __user *buf, size_t count, @@ -168,8 +174,8 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) const char *namep; int i; - cmd = &dev->priv.cmdif_debugfs; - *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + cmd = &dev->priv.dbg.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root); for (i = 0; i < MLX5_CMD_OP_MAX; i++) { stats = &dev->cmd.stats[i]; @@ -180,23 +186,51 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) debugfs_create_file("average", 0400, stats->root, stats, &stats_fops); debugfs_create_u64("n", 0400, stats->root, &stats->n); + debugfs_create_u64("failed", 0400, stats->root, &stats->failed); + debugfs_create_u64("failed_mbox_status", 0400, stats->root, + &stats->failed_mbox_status); + debugfs_create_u32("last_failed_errno", 0400, stats->root, + &stats->last_failed_errno); + debugfs_create_u8("last_failed_mbox_status", 0400, stats->root, + &stats->last_failed_mbox_status); } } } void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cmdif_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs); } void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) { - dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root); } void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) { - debugfs_remove_recursive(dev->priv.cq_debugfs); + debugfs_remove_recursive(dev->priv.dbg.cq_debugfs); +} + +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev) +{ + struct dentry *pages; + + dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root); + pages = dev->priv.dbg.pages_debugfs; + + debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages); + debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages); + debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages); + debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed); + debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped); + debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages, + &dev->priv.reclaim_pages_discard); +} + +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + debugfs_remove_recursive(dev->priv.dbg.pages_debugfs); } static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, @@ -441,7 +475,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs, &qp->dbg, qp->qpn, qp_fields, ARRAY_SIZE(qp_fields), qp); if (err) @@ -468,7 +502,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs, &eq->dbg, eq->eqn, eq_fields, ARRAY_SIZE(eq_fields), eq); if (err) @@ -493,7 +527,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) if (!mlx5_debugfs_root) return 0; - err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs, &cq->dbg, cq->cqn, cq_fields, ARRAY_SIZE(cq_fields), cq); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 40416e0a8bb1..e3fc15ae7bb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3410,7 +3410,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, if (!attr2 || !parse_attr) { kvfree(parse_attr); kfree(attr2); - return attr2; + return NULL; } memcpy(attr2, attr, attr_sz); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 98be7050aa8d..d8d36477b97f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1487,8 +1487,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) INIT_LIST_HEAD(&priv->pgdir_list); priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); - priv->dbg_root = debugfs_create_dir(dev_name(dev->device), - mlx5_debugfs_root); + priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), + mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); err = mlx5_tout_init(dev); @@ -1524,7 +1524,7 @@ err_pagealloc_init: err_health_init: mlx5_tout_cleanup(dev); err_timeout_init: - debugfs_remove(dev->priv.dbg_root); + debugfs_remove(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); @@ -1542,7 +1542,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_tout_cleanup(dev); - debugfs_remove_recursive(dev->priv.dbg_root); + debugfs_remove_recursive(dev->priv.dbg.dbg_root); mutex_destroy(&priv->pgdir_mutex); mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index f6b5451328fc..e0543b860144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -327,11 +327,12 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int notify_fail, bool ec_function) + int event, bool ec_function) { u32 function = get_function(func_id, ec_function); u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); + int notify_fail = event; u64 addr; int err; u32 *in; @@ -351,8 +352,10 @@ retry: if (err) { if (err == -ENOMEM) err = alloc_system_page(dev, function); - if (err) + if (err) { + dev->priv.fw_pages_alloc_failed += (npages - i); goto out_4k; + } goto retry; } @@ -365,11 +368,20 @@ retry: MLX5_SET(manage_pages_in, in, input_num_entries, npages); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out)); + if (err == -EREMOTEIO) { + notify_fail = 0; + /* if triggered by FW and failed by FW ignore */ + if (event) { + err = 0; + goto out_dropped; + } + } if (err) { + err = mlx5_cmd_check(dev, err, in, out); mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); - goto out_4k; + goto out_dropped; } dev->priv.fw_pages += npages; @@ -384,6 +396,8 @@ retry: kvfree(in); return 0; +out_dropped: + dev->priv.give_pages_dropped += npages; out_4k: for (i--; i >= 0; i--) free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); @@ -455,7 +469,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 i = 0; if (!mlx5_cmd_is_down(dev)) - return mlx5_cmd_exec(dev, in, in_size, out, out_size); + return mlx5_cmd_do(dev, in, in_size, out, out_size); /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); @@ -479,7 +493,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, } static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int *nclaimed, bool ec_function) + int *nclaimed, bool event, bool ec_function) { u32 function = get_function(func_id, ec_function); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); @@ -507,6 +521,14 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { + npages = MLX5_GET(manage_pages_in, in, input_num_entries); + dev->priv.reclaim_pages_discard += npages; + } + /* if triggered by FW event and failed by FW then ignore */ + if (event && err == -EREMOTEIO) + err = 0; + if (err) { + err = mlx5_cmd_check(dev, err, in, out); mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free; } @@ -546,7 +568,7 @@ static void pages_work_handler(struct work_struct *work) release_all_pages(dev, req->func_id, req->ec_function); else if (req->npages < 0) err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, - req->ec_function); + true, req->ec_function); else if (req->npages > 0) err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); @@ -645,7 +667,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, int err; err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), - &nclaimed, mlx5_core_is_ecpf(dev)); + &nclaimed, false, mlx5_core_is_ecpf(dev)); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", err, func_id); @@ -700,12 +722,14 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev) return -ENOMEM; xa_init(&dev->priv.page_root_xa); + mlx5_pages_debugfs_init(dev); return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + mlx5_pages_debugfs_cleanup(dev); xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index c61a5e83c78c..743422acc3d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -669,15 +669,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, case DR_ACTION_TYP_VPORT: attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; - if (rx_rule) { - if (action->vport->caps->num == MLX5_VPORT_UPLINK) { - mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); - return -EOPNOTSUPP; - } - attr.final_icm_addr = action->vport->caps->icm_address_rx; - } else { - attr.final_icm_addr = action->vport->caps->icm_address_tx; - } + attr.final_icm_addr = rx_rule ? + action->vport->caps->icm_address_rx : + action->vport->caps->icm_address_tx; break; case DR_ACTION_TYP_POP_VLAN: if (!rx_rule && !(dmn->ste_ctx->actions_caps & diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index 2784cd59fefe..2e8b109fb34e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -630,7 +630,7 @@ void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn) } dmn->dump_info.steering_debugfs = - debugfs_create_dir("steering", dev->priv.dbg_root); + debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev)); dmn->dump_info.fdb_debugfs = debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 5fa7f9d6d8b9..fc6ae49b5ecc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -8,7 +8,7 @@ #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \ ((dmn)->info.caps.dmn_type##_sw_owner || \ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ - (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) + (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7)) static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 38971fe1dfe1..a4b5b415df90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -47,6 +47,11 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) return spec->ttl_hoplimit; } +static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec) +{ + return spec->ipv4_ihl; +} + #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \ (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \ (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \ @@ -103,7 +108,7 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) static bool dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED); } @@ -144,7 +149,7 @@ static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *mi static bool dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED); } @@ -261,13 +266,13 @@ static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask, static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED); } static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) { - return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || + return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) || (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED); } @@ -507,7 +512,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.outer)) + if (dr_mask_is_ttl_set(&mask.outer) || + dr_mask_is_ipv4_ihl_set(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } @@ -614,7 +620,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++], &mask, inner, rx); - if (dr_mask_is_ttl_set(&mask.inner)) + if (dr_mask_is_ttl_set(&mask.inner) || + dr_mask_is_ipv4_ihl_set(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++], &mask, inner, rx); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 187e29b409b6..518e949847a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -523,8 +523,8 @@ void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr, - attr, added_stes); + ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps, + hw_ste_arr, attr, added_stes); } void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, @@ -534,8 +534,8 @@ void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) { - ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr, - attr, added_stes); + ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps, + hw_ste_arr, attr, added_stes); } const struct mlx5dr_ste_action_modify_field * @@ -793,6 +793,7 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bo spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr); spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr); + spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr); spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr); spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr); @@ -1360,15 +1361,14 @@ void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx, ste_ctx->build_tnl_header_0_1_init(sb, mask); } -static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { - [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, - [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, -}; - struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) { - if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX) - return NULL; + if (version == MLX5_STEERING_FORMAT_CONNECTX_5) + return mlx5dr_ste_get_ctx_v0(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX) + return mlx5dr_ste_get_ctx_v1(); + else if (version == MLX5_STEERING_FORMAT_CONNECTX_7) + return mlx5dr_ste_get_ctx_v2(); - return mlx5dr_ste_ctx_arr[version]; + return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index ca8fa32b8680..17513baff9b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -161,11 +161,13 @@ struct mlx5dr_ste_ctx { u32 actions_caps; void (*set_actions_rx)(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); void (*set_actions_tx)(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *hw_ste_arr, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); @@ -197,7 +199,8 @@ struct mlx5dr_ste_ctx { void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); }; -extern struct mlx5dr_ste_ctx ste_ctx_v0; -extern struct mlx5dr_ste_ctx ste_ctx_v1; +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void); +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void); #endif /* _DR_STE_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index 2d62950f7a29..5a322335f204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -408,6 +408,7 @@ static void dr_ste_v0_arr_init_next(u8 **last_ste, static void dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) @@ -477,6 +478,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, static void dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes) @@ -1152,6 +1154,7 @@ dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value, struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl); return 0; } @@ -1897,7 +1900,7 @@ static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag; } -struct mlx5dr_ste_ctx ste_ctx_v0 = { +static struct mlx5dr_ste_ctx ste_ctx_v0 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init, @@ -1950,3 +1953,8 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { .set_action_copy = &dr_ste_v0_set_action_copy, .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list, }; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void) +{ + return &ste_ctx_v0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 6ca06800f1d9..fcb962c6db2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -3,7 +3,7 @@ #include <linux/types.h> #include "mlx5_ifc_dr_ste_v1.h" -#include "dr_ste.h" +#include "dr_ste_v1.h" #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \ @@ -121,12 +121,12 @@ enum { DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90, - DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0 = 0x8c, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1 = 0x8d, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0 = 0x8e, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1 = 0x8f, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0 = 0x90, + DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, }; static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { @@ -223,22 +223,22 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { - .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31, + .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, }, [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, @@ -262,7 +262,7 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type); } -static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) { u64 index = miss_addr >> 6; @@ -270,7 +270,7 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr) MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index); } -static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) { u64 index = ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | @@ -279,12 +279,12 @@ static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) return index << 6; } -static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask); } -static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p) { return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask); } @@ -295,13 +295,13 @@ static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type) MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF); } -static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type) { MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8); MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF); } -static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p) { u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format); u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx); @@ -314,7 +314,7 @@ static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi); } -static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) { u64 index = (icm_addr >> 5) | ht_size; @@ -322,8 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - bool is_rx, u16 gvmi) +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -333,8 +332,7 @@ static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi); } -static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, - u32 ste_size) +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size) { u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL; u8 *mask = tag + DR_STE_SIZE_TAG; @@ -511,11 +509,12 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste, memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action)); } -static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -533,7 +532,10 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; - allow_modify_hdr = false; + + /* Check if vlan_pop and modify_hdr on same STE is supported */ + if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY)) + allow_modify_hdr = false; } if (action_type_set[DR_ACTION_TYP_CTR]) @@ -631,11 +633,12 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, - u8 *action_type_set, - u8 *last_ste, - struct mlx5dr_ste_actions_attr *attr, - u32 *added_stes) +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, + u8 *action_type_set, + u32 actions_caps, + u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, + u32 *added_stes) { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; @@ -677,13 +680,16 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; - allow_modify_hdr = false; - allow_ctr = false; } dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; + allow_ctr = false; + + /* Check if vlan_pop and modify_hdr on same STE is supported */ + if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY)) + allow_modify_hdr = false; } if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { @@ -731,9 +737,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_modify_hdr = true; - allow_ctr = false; } dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); + allow_ctr = false; } if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) { @@ -800,11 +806,11 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } -static void dr_ste_v1_set_action_set(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_set(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET); @@ -814,11 +820,11 @@ static void dr_ste_v1_set_action_set(u8 *d_action, MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data); } -static void dr_ste_v1_set_action_add(u8 *d_action, - u8 hw_field, - u8 shifter, - u8 length, - u32 data) +void dr_ste_v1_set_action_add(u8 *d_action, + u8 hw_field, + u8 shifter, + u8 length, + u32 data) { shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD); @@ -828,12 +834,12 @@ static void dr_ste_v1_set_action_add(u8 *d_action, MLX5_SET(ste_double_action_add_v1, d_action, add_value, data); } -static void dr_ste_v1_set_action_copy(u8 *d_action, - u8 dst_hw_field, - u8 dst_shifter, - u8 dst_len, - u8 src_hw_field, - u8 src_shifter) +void dr_ste_v1_set_action_copy(u8 *d_action, + u8 dst_hw_field, + u8 dst_shifter, + u8 dst_len, + u8 src_hw_field, + u8 src_shifter) { dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET; @@ -848,11 +854,11 @@ static void dr_ste_v1_set_action_copy(u8 *d_action, #define DR_STE_DECAP_L3_ACTION_NUM 8 #define DR_STE_L2_HDR_MAX_SZ 20 -static int dr_ste_v1_set_action_decap_l3_list(void *data, - u32 data_sz, - u8 *hw_action, - u32 hw_action_sz, - u16 *used_hw_action_num) +int dr_ste_v1_set_action_decap_l3_list(void *data, + u32 data_sz, + u8 *hw_action, + u32 hw_action_sz, + u16 *used_hw_action_num) { u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {}; void *data_ptr = padded_data; @@ -977,8 +983,8 @@ static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1001,8 +1007,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask); @@ -1025,8 +1031,8 @@ static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask); @@ -1060,8 +1066,8 @@ static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *va return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask); @@ -1079,8 +1085,8 @@ static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid); DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi); DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio); - DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ? - DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ? + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); + DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version); if (mask->svlan_tag || mask->cvlan_tag) { @@ -1201,8 +1207,8 @@ static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1234,8 +1240,8 @@ static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value, return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag); } -static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1314,8 +1320,8 @@ static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask); @@ -1331,12 +1337,13 @@ static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit); + DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl); return 0; } -static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask); @@ -1375,8 +1382,8 @@ static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask); @@ -1399,8 +1406,8 @@ static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask); @@ -1426,8 +1433,8 @@ static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask); @@ -1471,8 +1478,8 @@ static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask); @@ -1506,8 +1513,8 @@ static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask); @@ -1547,8 +1554,8 @@ static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *valu return 0; } -static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask); @@ -1594,8 +1601,8 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); @@ -1616,8 +1623,8 @@ static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask); @@ -1643,8 +1650,8 @@ static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask); @@ -1673,9 +1680,8 @@ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask); @@ -1703,9 +1709,8 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, return 0; } -static void -dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask); @@ -1726,8 +1731,8 @@ static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER; dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask); @@ -1749,8 +1754,8 @@ static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask); @@ -1773,8 +1778,8 @@ static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask); @@ -1837,8 +1842,8 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask); @@ -1892,8 +1897,8 @@ static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value, return 0; } -static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1901,8 +1906,8 @@ static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag; } -static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1; dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask); @@ -1926,7 +1931,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *va return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1959,7 +1964,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_par return 0; } -static void +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -1982,8 +1987,8 @@ static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *v return 0; } -static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask) +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask) { dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask); @@ -2008,7 +2013,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -2035,7 +2040,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, return 0; } -static void +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask) { @@ -2046,7 +2051,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag; } -struct mlx5dr_ste_ctx ste_ctx_v1 = { +static struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Builders */ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, @@ -2091,7 +2096,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Actions */ .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | DR_STE_CTX_ACTION_CAP_RX_PUSH | - DR_STE_CTX_ACTION_CAP_RX_ENCAP, + DR_STE_CTX_ACTION_CAP_RX_ENCAP | + DR_STE_CTX_ACTION_CAP_POP_MDFY, .set_actions_rx = &dr_ste_v1_set_actions_rx, .set_actions_tx = &dr_ste_v1_set_actions_tx, .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), @@ -2103,3 +2109,8 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void) +{ + return &ste_ctx_v1; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h new file mode 100644 index 000000000000..8a1d49790c6e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef _DR_STE_V1_ +#define _DR_STE_V1_ + +#include "dr_types.h" +#include "dr_ste.h" + +void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr); +u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p); +void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask); +u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p); +void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type); +u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p); +void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size); +void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi); +void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size); +void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set, + u32 actions_caps, u8 *last_ste, + struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); +void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter, + u8 length, u32 data); +void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter, + u8 dst_len, u8 src_hw_field, u8 src_shifter); +int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, + u32 hw_action_sz, u16 *used_hw_action_num); +void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); +void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask); + +#endif /* _DR_STE_V1_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c new file mode 100644 index 000000000000..c60fddd125d2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "dr_ste_v1.h" + +enum { + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0 = 0x00, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1 = 0x01, + DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2 = 0x02, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08, + DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09, + DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0 = 0x18, + DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1 = 0x19, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40, + DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e, + DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e, + DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f, + DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70, + DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b, + DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0 = 0x90, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1 = 0x91, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0 = 0x92, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1 = 0x93, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0 = 0x94, + DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1 = 0x95, +}; + +static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = { + [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP, + }, + [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15, + .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6, + }, + [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31, + .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31, + }, + [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = { + .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15, + }, +}; + +static struct mlx5dr_ste_ctx ste_ctx_v2 = { + /* Builders */ + .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, + .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init, + .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init, + .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init, + .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init, + .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init, + .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init, + .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init, + .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init, + .build_mpls_init = &dr_ste_v1_build_mpls_init, + .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, + .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, + .build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init, + .build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init, + .build_icmp_init = &dr_ste_v1_build_icmp_init, + .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, + .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, + .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, + .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, + .build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init, + .build_tnl_geneve_tlv_opt_exist_init = + &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init, + .build_register_0_init = &dr_ste_v1_build_register_0_init, + .build_register_1_init = &dr_ste_v1_build_register_1_init, + .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, + .build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init, + .build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init, + .build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init, + .build_tnl_header_0_1_init = &dr_ste_v1_build_tnl_header_0_1_init, + .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init, + .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init, + + /* Getters and Setters */ + .ste_init = &dr_ste_v1_init, + .set_next_lu_type = &dr_ste_v1_set_next_lu_type, + .get_next_lu_type = &dr_ste_v1_get_next_lu_type, + .set_miss_addr = &dr_ste_v1_set_miss_addr, + .get_miss_addr = &dr_ste_v1_get_miss_addr, + .set_hit_addr = &dr_ste_v1_set_hit_addr, + .set_byte_mask = &dr_ste_v1_set_byte_mask, + .get_byte_mask = &dr_ste_v1_get_byte_mask, + + /* Actions */ + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .set_actions_rx = &dr_ste_v1_set_actions_rx, + .set_actions_tx = &dr_ste_v1_set_actions_tx, + .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr), + .modify_field_arr = dr_ste_v2_action_modify_field_arr, + .set_action_set = &dr_ste_v1_set_action_set, + .set_action_add = &dr_ste_v1_set_action_add, + .set_action_copy = &dr_ste_v1_set_action_copy, + .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list, + + /* Send */ + .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, +}; + +struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void) +{ + return &ste_ctx_v2; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 55fcb751e24a..88092fabf55b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -91,6 +91,7 @@ enum mlx5dr_ste_ctx_action_cap { DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0, DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1, DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2, + DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3, }; enum { @@ -555,7 +556,9 @@ struct mlx5dr_match_spec { */ u32 tcp_dport:16; - u32 reserved_auto1:24; + u32 reserved_auto1:16; + u32 ipv4_ihl:4; + u32 reserved_auto2:4; u32 ttl_hoplimit:8; /* UDP source port.;tcp and udp sport/dport are mutually exclusive */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 57ffcab7294c..045b0cf90063 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -758,7 +758,7 @@ static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns, enum fs_flow_table_type ft_type) { if (ft_type != FS_FT_FDB || - MLX5_CAP_GEN(ns->dev, steering_format_version) != MLX5_STEERING_FORMAT_CONNECTX_6DX) + MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5) return 0; return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index dfa223415fe2..03efbdf3fec3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -136,7 +136,7 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && (MLX5_CAP_GEN(dev, steering_format_version) <= - MLX5_STEERING_FORMAT_CONNECTX_6DX))); + MLX5_STEERING_FORMAT_CONNECTX_7))); } /* buddy functions & structure */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d3b1a6a1f8d2..00a914b0716e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -264,6 +264,14 @@ enum { struct mlx5_cmd_stats { u64 sum; u64 n; + /* number of times command failed */ + u64 failed; + /* number of times command failed on bad status returned by FW */ + u64 failed_mbox_status; + /* last command failed returned errno */ + u32 last_failed_errno; + /* last bad status returned by FW */ + u8 last_failed_mbox_status; struct dentry *root; /* protect command average calculations */ spinlock_t lock; @@ -543,6 +551,15 @@ struct mlx5_adev { int idx; }; +struct mlx5_debugfs_entries { + struct dentry *dbg_root; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *pages_debugfs; +}; + struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ @@ -553,21 +570,19 @@ struct mlx5_priv { struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct xarray page_root_xa; - int fw_pages; + u32 fw_pages; atomic_t reg_pages; struct list_head free_list; - int vfs_pages; - int host_pf_pages; + u32 vfs_pages; + u32 host_pf_pages; + u32 fw_pages_alloc_failed; + u32 give_pages_dropped; + u32 reclaim_pages_discard; struct mlx5_core_health health; struct list_head traps; - /* start: qp staff */ - struct dentry *qp_debugfs; - struct dentry *eq_debugfs; - struct dentry *cq_debugfs; - struct dentry *cmdif_debugfs; - /* end: qp staff */ + struct mlx5_debugfs_entries dbg; /* start: alloc staff */ /* protect buffer allocation according to numa node */ @@ -577,7 +592,6 @@ struct mlx5_priv { struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ - struct dentry *dbg_root; struct list_head ctx_list; spinlock_t ctx_lock; @@ -955,6 +969,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; + u16 opcode; /* cmd opcode */ void *out; /* pointer to the cmd output buffer */ }; @@ -1015,6 +1030,8 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); @@ -1029,6 +1046,7 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ea65131835ab..bcf60ede6fcc 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -493,7 +493,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x18]; + u8 reserved_at_c0[0x10]; + u8 ipv4_ihl[0x4]; + u8 reserved_at_c4[0x4]; + u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; @@ -1343,6 +1346,7 @@ enum mlx5_fc_bulk_alloc_bitmask { enum { MLX5_STEERING_FORMAT_CONNECTX_5 = 0, MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, + MLX5_STEERING_FORMAT_CONNECTX_7 = 2, }; struct mlx5_ifc_cmd_hca_cap_bits { @@ -1419,8 +1423,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x6]; + u8 reserved_at_140[0x5]; u8 release_all_pages[0x1]; + u8 must_not_use[0x1]; u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; |