summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2025-03-13 14:40:45 +0300
committerPaolo Abeni <pabeni@redhat.com>2025-03-13 14:40:46 +0300
commit89d75c4c67aca1573aff905e72131a10847c5fda (patch)
tree1a80a5e04eca02c255362480151e3ed876430fb7 /include/linux
parent5a1dddd2944492edb1e61c5db1cc386e5419fcf4 (diff)
parentf550694e88b7b13b647777f889e03e544d9db60c (diff)
downloadlinux-89d75c4c67aca1573aff905e72131a10847c5fda.tar.xz
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Tariq Toukan says: ==================== mlx5-next updates 2025-03-10 The following pull-request contains common mlx5 updates for your *net-next* tree. Please pull and let me know of any problem. * 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux: net/mlx5: Add IFC bits for PPCNT recovery counters group net/mlx5: fs, add RDMA TRANSPORT steering domain support net/mlx5: Query ADV_RDMA capabilities net/mlx5: Limit non-privileged commands net/mlx5: Allow the throttle mechanism to be more dynamic net/mlx5: Add RDMA_CTRL HW capabilities ==================== Link: https://patch.msgid.link/1741608293-41436-1-git-send-email-tariqt@nvidia.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mlx5/fs.h10
-rw-r--r--include/linux/mlx5/mlx5_ifc.h63
4 files changed, 85 insertions, 6 deletions
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index fd37f4e54d76..904804e995aa 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1251,6 +1251,7 @@ enum mlx5_cap_type {
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
+ MLX5_CAP_ADV_RDMA = 0x28,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1345,6 +1346,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_rx_flow_table_properties.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_tx_flow_table_properties.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
@@ -1384,6 +1391,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(adv_virtualization_cap, \
mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+#define MLX5_CAP_ADV_RDMA(mdev, cap) \
+ MLX5_GET(adv_rdma_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap)
+
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
@@ -1506,6 +1517,7 @@ enum {
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 46bd7550adf8..d1dfbad9a447 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -304,6 +304,8 @@ struct mlx5_cmd {
struct semaphore sem;
struct semaphore pages_sem;
struct semaphore throttle_sem;
+ struct semaphore unprivileged_sem;
+ struct xarray privileged_uids;
} vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
@@ -964,6 +966,8 @@ struct mlx5_async_work {
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
u16 op_mod; /* cmd op_mod */
+ u8 throttle_locked:1;
+ u8 unpriv_locked:1;
void *out; /* pointer to the cmd output buffer */
};
@@ -994,6 +998,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 01cb72d68c23..fd62b2b1611d 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,6 +40,7 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 0
#define MLX5_FS_MAX_POOL_SIZE BIT(30)
enum mlx5_flow_destination_type {
@@ -110,6 +111,8 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX,
};
enum {
@@ -194,9 +197,9 @@ struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
-mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport);
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport_idx);
struct mlx5_flow_table_attr {
int prio;
@@ -204,6 +207,7 @@ struct mlx5_flow_table_attr {
u32 level;
u32 flags;
u16 uid;
+ u16 vport;
struct mlx5_flow_table *next_ft;
struct {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cc2875e843f7..2c09df4ee574 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1570,6 +1570,8 @@ enum {
enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
+ MLX5_UCTX_CAP_RDMA_CTRL = 1UL << 3,
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA = 1UL << 4,
};
#define MLX5_FC_BULK_SIZE_FACTOR 128
@@ -1991,7 +1993,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x9];
+ u8 reserved_at_570[0x1];
+ u8 adv_rdma[0x1];
+ u8 reserved_at_572[0x7];
u8 adv_virtualization[0x1];
u8 reserved_at_57a[0x6];
@@ -2140,7 +2144,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
- u8 reserved_at_1c0[0x60];
+ u8 general_obj_types_127_64[0x40];
+ u8 reserved_at_200[0x20];
u8 reserved_at_220[0x1];
u8 sw_vhca_id_valid[0x1];
@@ -2640,6 +2645,12 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits {
u8 field_select_8021qaurp[0x20];
};
+struct mlx5_ifc_phys_layer_recovery_cntrs_bits {
+ u8 total_successful_recovery_events[0x20];
+
+ u8 reserved_at_20[0x7a0];
+};
+
struct mlx5_ifc_phys_layer_cntrs_bits {
u8 time_since_last_clear_high[0x20];
@@ -4841,6 +4852,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -10579,7 +10591,9 @@ struct mlx5_ifc_mtutc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x1d];
+ u8 reserved_at_0[0x10];
+ u8 ppcnt_recovery_counters[0x1];
+ u8 reserved_at_11[0xc];
u8 fec_200G_per_lane_in_pplm[0x1];
u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
@@ -12495,12 +12509,17 @@ enum {
};
enum {
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = BIT_ULL(0x13),
+};
+
+enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
@@ -13068,6 +13087,44 @@ struct mlx5_ifc_load_vhca_state_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_adv_rdma_cap_bits {
+ u8 rdma_transport_manager[0x1];
+ u8 rdma_transport_manager_other_eswitch[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 rcx_type[0x8];
+ u8 reserved_at_28[0x2];
+ u8 ps_entry_log_max_value[0x6];
+ u8 reserved_at_30[0x6];
+ u8 qp_max_ps_num_entry[0xa];
+
+ u8 mp_max_num_queues[0x8];
+ u8 ps_user_context_max_log_size[0x8];
+ u8 message_based_qp_and_striding_wq[0x8];
+ u8 reserved_at_58[0x8];
+
+ u8 max_receive_send_message_size_stride[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 max_receive_send_message_size_byte[0x20];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_rx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_tx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_bitmask_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_bitmask_support_2;
+
+ u8 reserved_at_800[0x3800];
+};
+
struct mlx5_ifc_adv_virtualization_cap_bits {
u8 reserved_at_0[0x3];
u8 pg_track_log_max_num[0x5];