summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/device.h12
-rw-r--r--include/linux/mlx5/driver.h15
-rw-r--r--include/linux/mlx5/fs_helpers.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h102
-rw-r--r--include/rdma/ib_addr.h9
-rw-r--r--include/rdma/ib_cache.h29
-rw-r--r--include/rdma/ib_sa.h13
-rw-r--r--include/rdma/ib_verbs.h212
-rw-r--r--include/rdma/rdma_cm.h44
-rw-r--r--include/rdma/rdma_vt.h2
-rw-r--r--include/rdma/restrack.h26
-rw-r--r--include/rdma/uverbs_ioctl.h153
-rw-r--r--include/rdma/uverbs_named_ioctl.h90
-rw-r--r--include/rdma/uverbs_std_types.h34
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h21
-rw-r--r--include/uapi/rdma/cxgb3-abi.h17
-rw-r--r--include/uapi/rdma/cxgb4-abi.h29
-rw-r--r--include/uapi/rdma/hfi/hfi1_ioctl.h32
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h10
-rw-r--r--include/uapi/rdma/hns-abi.h22
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_cm.h48
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h134
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h96
-rw-r--r--include/uapi/rdma/ib_user_mad.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h195
-rw-r--r--include/uapi/rdma/mlx4-abi.h52
-rw-r--r--include/uapi/rdma/mlx5-abi.h72
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h48
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h43
-rw-r--r--include/uapi/rdma/mthca-abi.h10
-rw-r--r--include/uapi/rdma/nes-abi.h6
-rw-r--r--include/uapi/rdma/ocrdma-abi.h36
-rw-r--r--include/uapi/rdma/qedr-abi.h20
-rw-r--r--include/uapi/rdma/rdma_netlink.h51
-rw-r--r--include/uapi/rdma/rdma_user_cm.h49
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h38
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h99
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h58
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h49
41 files changed, 1569 insertions, 530 deletions
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a9b5fed8f7c6..81d0799b6091 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -257,10 +257,6 @@ enum {
};
enum {
- MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
-};
-
-enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 12758595459b..2bc27f8c5b87 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1017,6 +1017,8 @@ enum mlx5_cap_type {
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1168,6 +1170,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP64_FPGA(mdev, cap) \
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
+#define MLX5_CAP_DEV_MEM(mdev, cap)\
+ MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
+#define MLX5_CAP64_DEV_MEM(mdev, cap)\
+ MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1211,8 +1219,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index cded85ab6fe4..767d193c269a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -591,8 +591,14 @@ struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_pagefault;
+struct mlx5_rate_limit {
+ u32 rate;
+ u32 max_burst_sz;
+ u16 typical_pkt_sz;
+};
+
struct mlx5_rl_entry {
- u32 rate;
+ struct mlx5_rate_limit rl;
u16 index;
u16 refcount;
};
@@ -1107,9 +1113,12 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
+ struct mlx5_rate_limit *rl);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
+ struct mlx5_rate_limit *rl_1);
int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 7b476bbae731..9db21cd0e92c 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,6 +38,14 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
+static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+
+ return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
+}
+
static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
const u32 *match_v, u8 match)
{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d25011f84815..1aad455538f4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -92,6 +92,8 @@ enum {
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_ALLOC_MEMIC = 0x205,
+ MLX5_CMD_OP_DEALLOC_MEMIC = 0x206,
MLX5_CMD_OP_CREATE_EQ = 0x301,
MLX5_CMD_OP_DESTROY_EQ = 0x302,
MLX5_CMD_OP_QUERY_EQ = 0x303,
@@ -575,7 +577,10 @@ struct mlx5_ifc_qos_cap_bits {
u8 esw_scheduling[0x1];
u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1c];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 reserved_at_7[0x19];
u8 reserved_at_20[0x20];
@@ -669,6 +674,24 @@ struct mlx5_ifc_roce_cap_bits {
u8 reserved_at_100[0x700];
};
+struct mlx5_ifc_device_mem_cap_bits {
+ u8 memic[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0xb];
+ u8 log_min_memic_alloc_size[0x5];
+ u8 reserved_at_30[0x8];
+ u8 log_max_memic_addr_alignment[0x8];
+
+ u8 memic_bar_start_addr[0x40];
+
+ u8 memic_bar_size[0x20];
+
+ u8 max_memic_size[0x20];
+
+ u8 reserved_at_c0[0x740];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -883,7 +906,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
- u8 early_vf_enable[0x1];
+ u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
@@ -927,7 +950,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x5];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
u8 umr_fence[0x2];
u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1];
@@ -2748,12 +2775,17 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_at_2[0xd];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2761,7 +2793,7 @@ struct mlx5_ifc_mkc_bits {
u8 rr[0x1];
u8 lw[0x1];
u8 lr[0x1];
- u8 access_mode[0x2];
+ u8 access_mode_1_0[0x2];
u8 reserved_at_18[0x8];
u8 qpn[0x18];
@@ -7397,7 +7429,12 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits {
u8 rate_limit[0x20];
- u8 reserved_at_a0[0x160];
+ u8 burst_upper_bound[0x20];
+
+ u8 reserved_at_c0[0x10];
+ u8 typical_packet_size[0x10];
+
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_access_register_out_bits {
@@ -8951,4 +8988,57 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_alloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_30[0x20];
+
+ u8 reserved_at_40[0x18];
+ u8 log_memic_addr_alignment[0x8];
+
+ u8 range_start_addr[0x40];
+
+ u8 range_size[0x20];
+
+ u8 memic_size[0x20];
+};
+
+struct mlx5_ifc_alloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 memic_start_addr[0x40];
+};
+
+struct mlx5_ifc_dealloc_memic_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 memic_start_addr[0x40];
+
+ u8 memic_size[0x20];
+
+ u8 reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_dealloc_memic_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 415e09960017..a08cc7278980 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -119,10 +119,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
struct rdma_dev_addr *addr, void *context),
void *context);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr);
-
void rdma_addr_cancel(struct rdma_dev_addr *addr);
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
@@ -133,11 +129,6 @@ int rdma_addr_size(struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
-int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
- const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
- int *hoplimit);
-
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
{
return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9];
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 385ec88ee9e5..eb49cc8d1f95 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -55,20 +55,6 @@ int ib_get_cached_gid(struct ib_device *device,
union ib_gid *gid,
struct ib_gid_attr *attr);
-/**
- * ib_find_cached_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @ndev: In RoCE, the net device of the device. NULL means ignore.
- * @port_num: The port number of the device where the GID value was found.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
@@ -76,21 +62,6 @@ int ib_find_cached_gid(struct ib_device *device,
u8 *port_num,
u16 *index);
-/**
- * ib_find_cached_gid_by_port - Returns the GID table index where a specified
- * GID value occurs
- * @device: The device to query.
- * @gid: The GID value to search for.
- * @gid_type: The GID type to search for.
- * @port_num: The port number of the device where the GID value sould be
- * searched.
- * @ndev: In RoCE, the net device of the device. Null means ignore.
- * @index: The index into the cached GID table where the GID was found. This
- * parameter may be NULL.
- *
- * ib_find_cached_gid() searches for the specified GID value in
- * the local software cache.
- */
int ib_find_cached_gid_by_port(struct ib_device *device,
const union ib_gid *gid,
enum ib_gid_type gid_type,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 811cfcfcbe3d..bacb144f7780 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -163,7 +163,15 @@ struct sa_path_rec_ib {
u8 raw_traffic;
};
+/**
+ * struct sa_path_rec_roce - RoCE specific portion of the path record entry
+ * @route_resolved: When set, it indicates that this route is already
+ * resolved for this path record entry.
+ * @dmac: Destination mac address for the given DGID entry
+ * of the path record entry.
+ */
struct sa_path_rec_roce {
+ bool route_resolved;
u8 dmac[ETH_ALEN];
/* ignored in IB */
int ifindex;
@@ -590,6 +598,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
+static inline bool sa_path_is_opa(struct sa_path_rec *rec)
+{
+ return (rec->rec_type == SA_PATH_REC_TYPE_OPA);
+}
+
static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6eb174753acf..9fc8a825aa28 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -64,6 +64,8 @@
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
#include <rdma/restrack.h>
+#include <uapi/rdma/rdma_user_ioctl.h>
+#include <uapi/rdma/ib_user_ioctl_verbs.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -90,8 +92,11 @@ enum ib_gid_type {
#define ROCE_V2_UDP_DPORT 4791
struct ib_gid_attr {
- enum ib_gid_type gid_type;
struct net_device *ndev;
+ struct ib_device *device;
+ enum ib_gid_type gid_type;
+ u16 index;
+ u8 port_num;
};
enum rdma_node_type {
@@ -316,6 +321,18 @@ struct ib_cq_caps {
u16 max_cq_moderation_period;
};
+struct ib_dm_mr_attr {
+ u64 length;
+ u64 offset;
+ u32 access_flags;
+};
+
+struct ib_dm_alloc_attr {
+ u64 length;
+ u32 alignment;
+ u32 flags;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -367,6 +384,7 @@ struct ib_device_attr {
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
+ u64 max_dm_size;
};
enum ib_mtu {
@@ -469,6 +487,9 @@ enum ib_port_speed {
/**
* struct rdma_hw_stats
+ * @lock - Mutex to protect parallel write access to lifespan and values
+ * of counters, which are 64bits and not guaranteeed to be written
+ * atomicaly on 32bits systems.
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
@@ -484,6 +505,7 @@ enum ib_port_speed {
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
+ struct mutex lock; /* Protect lifespan and values[] */
unsigned long timestamp;
unsigned long lifespan;
const char * const *names;
@@ -1755,6 +1777,14 @@ struct ib_qp {
struct rdma_restrack_entry res;
};
+struct ib_dm {
+ struct ib_device *device;
+ u32 length;
+ u32 flags;
+ struct ib_uobject *uobject;
+ atomic_t usecnt;
+};
+
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
@@ -1768,6 +1798,13 @@ struct ib_mr {
struct ib_uobject *uobject; /* user */
struct list_head qp_entry; /* FR */
};
+
+ struct ib_dm *dm;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mw {
@@ -1810,6 +1847,7 @@ enum ib_flow_spec_type {
/* L3 header*/
IB_FLOW_SPEC_IPV4 = 0x30,
IB_FLOW_SPEC_IPV6 = 0x31,
+ IB_FLOW_SPEC_ESP = 0x34,
/* L4 headers*/
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41,
@@ -1818,6 +1856,7 @@ enum ib_flow_spec_type {
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
IB_FLOW_SPEC_ACTION_DROP = 0x1001,
+ IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
@@ -1835,7 +1874,8 @@ enum ib_flow_domain {
enum ib_flow_flags {
IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
- IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */
+ IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
+ IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
};
struct ib_flow_eth_filter {
@@ -1940,6 +1980,20 @@ struct ib_flow_spec_tunnel {
struct ib_flow_tunnel_filter mask;
};
+struct ib_flow_esp_filter {
+ __be32 spi;
+ __be32 seq;
+ /* Must be last */
+ u8 real_sz[0];
+};
+
+struct ib_flow_spec_esp {
+ u32 type;
+ u16 size;
+ struct ib_flow_esp_filter val;
+ struct ib_flow_esp_filter mask;
+};
+
struct ib_flow_spec_action_tag {
enum ib_flow_spec_type type;
u16 size;
@@ -1951,6 +2005,12 @@ struct ib_flow_spec_action_drop {
u16 size;
};
+struct ib_flow_spec_action_handle {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_action *act;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -1962,8 +2022,10 @@ union ib_flow_spec {
struct ib_flow_spec_tcp_udp tcp_udp;
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
+ struct ib_flow_spec_esp esp;
struct ib_flow_spec_action_tag flow_tag;
struct ib_flow_spec_action_drop drop;
+ struct ib_flow_spec_action_handle action;
};
struct ib_flow_attr {
@@ -1984,6 +2046,64 @@ struct ib_flow {
struct ib_uobject *uobject;
};
+enum ib_flow_action_type {
+ IB_FLOW_ACTION_UNSPECIFIED,
+ IB_FLOW_ACTION_ESP = 1,
+};
+
+struct ib_flow_action_attrs_esp_keymats {
+ enum ib_uverbs_flow_action_esp_keymat protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
+ } keymat;
+};
+
+struct ib_flow_action_attrs_esp_replays {
+ enum ib_uverbs_flow_action_esp_replay protocol;
+ union {
+ struct ib_uverbs_flow_action_esp_replay_bmp bmp;
+ } replay;
+};
+
+enum ib_flow_action_attrs_esp_flags {
+ /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
+ * This is done in order to share the same flags between user-space and
+ * kernel and spare an unnecessary translation.
+ */
+
+ /* Kernel flags */
+ IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
+ IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
+};
+
+struct ib_flow_spec_list {
+ struct ib_flow_spec_list *next;
+ union ib_flow_spec spec;
+};
+
+struct ib_flow_action_attrs_esp {
+ struct ib_flow_action_attrs_esp_keymats *keymat;
+ struct ib_flow_action_attrs_esp_replays *replay;
+ struct ib_flow_spec_list *encap;
+ /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
+ * Value of 0 is a valid value.
+ */
+ u32 esn;
+ u32 spi;
+ u32 seq;
+ u32 tfc_pad;
+ /* Use enum ib_flow_action_attrs_esp_flags */
+ u64 flags;
+ u64 hard_limit_pkts;
+};
+
+struct ib_flow_action {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ enum ib_flow_action_type type;
+ atomic_t usecnt;
+};
+
struct ib_mad_hdr;
struct ib_grh;
@@ -2060,6 +2180,8 @@ struct ib_port_pkey_list {
struct list_head pkey_list;
};
+struct uverbs_attr_bundle;
+
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
@@ -2127,37 +2249,36 @@ struct ib_device {
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u8 port_num);
+ /* query_gid should be return GID value for @device, when @port_num
+ * link layer is either IB or iWarp. It is no-op if @port_num port
+ * is RoCE link layer.
+ */
int (*query_gid)(struct ib_device *device,
u8 port_num, int index,
union ib_gid *gid);
- /* When calling add_gid, the HW vendor's driver should
- * add the gid of device @device at gid index @index of
- * port @port_num to be @gid. Meta-info of that gid (for example,
- * the network device related to this gid is available
- * at @attr. @context allows the HW vendor driver to store extra
- * information together with a GID entry. The HW vendor may allocate
- * memory to contain this information and store it in @context when a
- * new GID entry is written to. Params are consistent until the next
- * call of add_gid or delete_gid. The function should return 0 on
+ /* When calling add_gid, the HW vendor's driver should add the gid
+ * of device of port at gid index available at @attr. Meta-info of
+ * that gid (for example, the network device related to this gid) is
+ * available at @attr. @context allows the HW vendor driver to store
+ * extra information together with a GID entry. The HW vendor driver may
+ * allocate memory to contain this information and store it in @context
+ * when a new GID entry is written to. Params are consistent until the
+ * next call of add_gid or delete_gid. The function should return 0 on
* success or error otherwise. The function could be called
- * concurrently for different ports. This function is only called
- * when roce_gid_table is used.
+ * concurrently for different ports. This function is only called when
+ * roce_gid_table is used.
*/
- int (*add_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
- const union ib_gid *gid,
+ int (*add_gid)(const union ib_gid *gid,
const struct ib_gid_attr *attr,
void **context);
/* When calling del_gid, the HW vendor's driver should delete the
- * gid of device @device at gid index @index of port @port_num.
+ * gid of device @device at gid index gid_index of port port_num
+ * available in @attr.
* Upon the deletion of a GID entry, the HW vendor must free any
* allocated memory. The caller will clear @context afterwards.
* This function is only called when roce_gid_table is used.
*/
- int (*del_gid)(struct ib_device *device,
- u8 port_num,
- unsigned int index,
+ int (*del_gid)(const struct ib_gid_attr *attr,
void **context);
int (*query_pkey)(struct ib_device *device,
u8 port_num, u16 index, u16 *pkey);
@@ -2315,6 +2436,21 @@ struct ib_device {
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*destroy_flow_action)(struct ib_flow_action *action);
+ int (*modify_flow_action_esp)(struct ib_flow_action *action,
+ const struct ib_flow_action_attrs_esp *attr,
+ struct uverbs_attr_bundle *attrs);
+ struct ib_dm * (*alloc_dm)(struct ib_device *device,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dm)(struct ib_dm *dm);
+ struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
/**
* rdma netdev operation
*
@@ -2376,6 +2512,7 @@ struct ib_device {
int comp_vector);
struct uverbs_root_spec *specs_root;
+ enum rdma_driver_id driver_id;
};
struct ib_client {
@@ -2435,11 +2572,9 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len
return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
}
-static inline bool ib_is_udata_cleared(struct ib_udata *udata,
- size_t offset,
- size_t len)
+static inline bool ib_is_buffer_cleared(const void __user *p,
+ size_t len)
{
- const void __user *p = udata->inbuf + offset;
bool ret;
u8 *buf;
@@ -2455,6 +2590,13 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
return ret;
}
+static inline bool ib_is_udata_cleared(struct ib_udata *udata,
+ size_t offset,
+ size_t len)
+{
+ return ib_is_buffer_cleared(udata->inbuf + offset, len);
+}
+
/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
@@ -2471,9 +2613,9 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
* transition from cur_state to next_state is allowed by the IB spec,
* and that the attribute mask supplied is allowed for the transition.
*/
-int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask,
+ enum rdma_link_layer ll);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -2848,7 +2990,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- struct net_device *ndev, u8 *port_num, u16 *index);
+ u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -3217,18 +3359,6 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
}
/**
- * ib_peek_cq - Returns the number of unreaped completions currently
- * on the specified CQ.
- * @cq: The CQ to peek.
- * @wc_cnt: A minimum number of unreaped completions to check for.
- *
- * If the number of unreaped completions is greater than or equal to wc_cnt,
- * this function returns wc_cnt, otherwise, it returns the actual number of
- * unreaped completions.
- */
-int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-/**
* ib_req_notify_cq - Request completion notification on a CQ.
* @cq: The CQ to generate an event for.
* @flags:
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 6538a5cc27b6..690934733ba7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -38,6 +38,7 @@
#include <linux/in6.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_sa.h>
+#include <uapi/rdma/rdma_user_cm.h>
/*
* Upon receiving a device removal event, users must destroy the associated
@@ -64,14 +65,6 @@ enum rdma_cm_event_type {
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
-enum rdma_port_space {
- RDMA_PS_SDP = 0x0001,
- RDMA_PS_IPOIB = 0x0002,
- RDMA_PS_IB = 0x013F,
- RDMA_PS_TCP = 0x0106,
- RDMA_PS_UDP = 0x0111,
-};
-
#define RDMA_IB_IP_PS_MASK 0xFFFFFFFFFFFF0000ULL
#define RDMA_IB_IP_PS_TCP 0x0000000001060000ULL
#define RDMA_IB_IP_PS_UDP 0x0000000001110000ULL
@@ -120,20 +113,6 @@ struct rdma_cm_event {
} param;
};
-enum rdma_cm_state {
- RDMA_CM_IDLE,
- RDMA_CM_ADDR_QUERY,
- RDMA_CM_ADDR_RESOLVED,
- RDMA_CM_ROUTE_QUERY,
- RDMA_CM_ROUTE_RESOLVED,
- RDMA_CM_CONNECT,
- RDMA_CM_DISCONNECT,
- RDMA_CM_ADDR_BOUND,
- RDMA_CM_LISTEN,
- RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
-};
-
struct rdma_cm_id;
/**
@@ -152,11 +131,17 @@ struct rdma_cm_id {
struct ib_qp *qp;
rdma_cm_event_handler event_handler;
struct rdma_route route;
- enum rdma_port_space ps;
+ enum rdma_ucm_port_space ps;
enum ib_qp_type qp_type;
u8 port_num;
};
+struct rdma_cm_id *__rdma_create_id(struct net *net,
+ rdma_cm_event_handler event_handler,
+ void *context, enum rdma_ucm_port_space ps,
+ enum ib_qp_type qp_type,
+ const char *caller);
+
/**
* rdma_create_id - Create an RDMA identifier.
*
@@ -169,10 +154,9 @@ struct rdma_cm_id {
*
* The id holds a reference on the network namespace until it is destroyed.
*/
-struct rdma_cm_id *rdma_create_id(struct net *net,
- rdma_cm_event_handler event_handler,
- void *context, enum rdma_port_space ps,
- enum ib_qp_type qp_type);
+#define rdma_create_id(net, event_handler, context, ps, qp_type) \
+ __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
+ KBUILD_MODNAME)
/**
* rdma_destroy_id - Destroys an RDMA identifier.
@@ -284,6 +268,9 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
*/
int rdma_listen(struct rdma_cm_id *id, int backlog);
+int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller);
+
/**
* rdma_accept - Called to accept a connection request or response.
* @id: Connection identifier associated with the request.
@@ -299,7 +286,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
* state of the qp associated with the id is modified to error, such that any
* previously posted receive buffers would be flushed.
*/
-int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+#define rdma_accept(id, conn_param) \
+ __rdma_accept((id), (conn_param), KBUILD_MODNAME)
/**
* rdma_notify - Notifies the RDMA CM of an asynchronous event that has
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4118324a0310..3f4c187e435d 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -538,7 +538,7 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
-int rvt_register_device(struct rvt_dev_info *rvd);
+int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 2cdf8dcf4bdc..f3b3e3576f6a 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/kref.h>
#include <linux/completion.h>
+#include <linux/sched/task.h>
/**
* enum rdma_restrack_type - HW objects to track
@@ -29,6 +30,14 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_QP,
/**
+ * @RDMA_RESTRACK_CM_ID: Connection Manager ID (CM_ID)
+ */
+ RDMA_RESTRACK_CM_ID,
+ /**
+ * @RDMA_RESTRACK_MR: Memory Region (MR)
+ */
+ RDMA_RESTRACK_MR,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
@@ -146,8 +155,23 @@ static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
/**
- * rdma_restrack_put() - relase resource
+ * rdma_restrack_put() - release resource
* @res: resource entry
*/
int rdma_restrack_put(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_set_task() - set the task for this resource
+ * @res: resource entry
+ * @task: task struct
+ */
+static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ struct task_struct *task)
+{
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(task);
+ res->task = task;
+}
+
#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 38287d9d23a1..4a4201d997a7 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <rdma/rdma_user_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* =======================================
@@ -50,6 +51,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_PTR_OUT,
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
+ UVERBS_ATTR_TYPE_ENUM_IN,
};
enum uverbs_obj_access {
@@ -61,15 +63,32 @@ enum uverbs_obj_access {
enum {
UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0,
- /* Support extending attributes by length */
- UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1,
+ /* Support extending attributes by length, validate all unknown size == zero */
+ UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1,
};
+/* Specification of a single attribute inside the ioctl message */
struct uverbs_attr_spec {
- enum uverbs_attr_type type;
union {
- u16 len;
+ /* Header shared by all following union members - to reduce space. */
struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ };
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ /* Current known size to kernel */
+ u16 len;
+ /* User isn't allowed to provide something < min_len */
+ u16 min_len;
+ } ptr;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
/*
* higher bits mean the namespace and lower bits mean
* the type id within the namespace.
@@ -77,9 +96,19 @@ struct uverbs_attr_spec {
u16 obj_type;
u8 access;
} obj;
+ struct {
+ enum uverbs_attr_type type;
+ /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
+ u8 flags;
+ u8 num_elems;
+ /*
+ * The enum attribute can select one of the attributes
+ * contained in the ids array. Currently only PTR_IN
+ * attributes are supported in the ids array.
+ */
+ const struct uverbs_attr_spec *ids;
+ } enum_def;
};
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
};
struct uverbs_attr_spec_hash {
@@ -164,30 +193,45 @@ struct uverbs_object_tree_def {
};
#define UA_FLAGS(_flags) .flags = _flags
-#define __UVERBS_ATTR0(_id, _len, _type, ...) \
+#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } })
-#define __UVERBS_ATTR1(_id, _len, _type, _flags) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } })
+#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \
((const struct uverbs_attr_def) \
- {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } })
-#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \
- __UVERBS_ATTR##_n(_id, _len, _type, _flags)
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} })
+#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \
+ ((const struct uverbs_attr_def) \
+ {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} })
+#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \
+ __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2)
+
+#define UVERBS_ATTR_TYPE(_type) \
+ .min_len = sizeof(_type), .len = sizeof(_type)
+#define UVERBS_ATTR_STRUCT(_type, _last) \
+ .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type)
+#define UVERBS_ATTR_SIZE(_min_len, _len) \
+ .min_len = _min_len, .len = _len
+
/*
* In new compiler, UVERBS_ATTR could be simplified by declaring it as
* [_id] = {.type = _type, .len = _len, ##__VA_ARGS__}
* But since we support older compilers too, we need the more complex code.
*/
-#define UVERBS_ATTR(_id, _len, _type, ...) \
- __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0)
+#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \
+ __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0)
#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__)
/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */
#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \
- UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__)
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__)
#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \
- UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__)
+ UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__)
+#define UVERBS_ATTR_ENUM_IN(_id, _enum_arr, ...) \
+ UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_ENUM_IN, enum_def, \
+ .ids = (_enum_arr), \
+ .num_elems = ARRAY_SIZE(_enum_arr), ##__VA_ARGS__)
/*
* In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring
@@ -202,15 +246,13 @@ struct uverbs_object_tree_def {
#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access } },\
- .flags = 0} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, .flags = 0 } }, } })
#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\
((const struct uverbs_attr_def) \
{.id = _id, \
- .attr = {.type = _obj_class, \
- {.obj = {.obj_type = _obj_type, .access = _access} }, \
- _flags} })
+ .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
+ .access = _access, _flags} }, } })
#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \
_n, ...) \
___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags)
@@ -229,6 +271,11 @@ struct uverbs_object_tree_def {
#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \
const struct uverbs_attr_def _name = __VA_ARGS__
+#define DECLARE_UVERBS_ENUM(_name, ...) \
+ const struct uverbs_enum_spec _name = { \
+ .len = ARRAY_SIZE(((struct uverbs_attr_spec[]){__VA_ARGS__})),\
+ .ids = {__VA_ARGS__}, \
+ }
#define _UVERBS_METHOD_ATTRS_SZ(...) \
(sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\
sizeof(const struct uverbs_attr_def *))
@@ -280,6 +327,7 @@ struct uverbs_ptr_attr {
u16 len;
/* Combination of bits from enum UVERBS_ATTR_F_XXXX */
u16 flags;
+ u8 enum_id;
};
struct uverbs_obj_attr {
@@ -336,6 +384,8 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
idx & ~UVERBS_ID_NS_MASK);
}
+#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
+
static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
u16 idx)
{
@@ -347,6 +397,29 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK];
}
+static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ return attr->ptr_attr.enum_id;
+}
+
+static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject;
+
+ if (IS_ERR(uobj))
+ return uobj;
+
+ return uobj->object;
+}
+
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from, size_t size)
{
@@ -385,8 +458,8 @@ static inline int _uverbs_copy_from(void *to,
/*
* Validation ensures attr->ptr_attr.len >= size. If the caller is
- * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
- * the right size.
+ * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call
+ * uverbs_copy_from_or_zero.
*/
if (unlikely(size < attr->ptr_attr.len))
return -EINVAL;
@@ -400,9 +473,37 @@ static inline int _uverbs_copy_from(void *to,
return 0;
}
+static inline int _uverbs_copy_from_or_zero(void *to,
+ const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx,
+ size_t size)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+ size_t min_size;
+
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ min_size = min_t(size_t, size, attr->ptr_attr.len);
+
+ if (uverbs_attr_ptr_is_inline(attr))
+ memcpy(to, &attr->ptr_attr.data, min_size);
+ else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
+ min_size))
+ return -EFAULT;
+
+ if (size > min_size)
+ memset(to + min_size, 0, size - min_size);
+
+ return 0;
+}
+
#define uverbs_copy_from(to, attrs_bundle, idx) \
_uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
+#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \
+ _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
+
/* =================================================
* Definitions -> Specs infrastructure
* =================================================
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
new file mode 100644
index 000000000000..c5bb4ebdb0b0
--- /dev/null
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_NAMED_IOCTL_
+#define _UVERBS_NAMED_IOCTL_
+
+#include <rdma/uverbs_ioctl.h>
+
+#ifndef UVERBS_MODULE_NAME
+#error "Please #define UVERBS_MODULE_NAME before including rdma/uverbs_named_ioctl.h"
+#endif
+
+#define _UVERBS_PASTE(x, y) x ## y
+#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y)
+#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id)
+#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id)
+
+#define DECLARE_UVERBS_NAMED_METHOD(id, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, UVERBS_HANDLER(id), ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, handler, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_METHOD_NO_OVERRIDE(id, handler, ...) \
+ DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, NULL, ##__VA_ARGS__)
+
+#define DECLARE_UVERBS_NAMED_OBJECT(id, ...) \
+ DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__)
+
+#define _UVERBS_COMP_NAME(x, y, z) _UVERBS_NAME(_UVERBS_NAME(x, y), z)
+
+#define UVERBS_NO_OVERRIDE NULL
+
+/* This declares a parsing tree with one object and one method. This is usually
+ * used for merging driver attributes to the common attributes. The driver has
+ * a chance to override the handler and type attrs of the original object.
+ * The __VA_ARGS__ just contains a list of attributes.
+ */
+#define ADD_UVERBS_ATTRIBUTES(_name, _object, _method, _type_attrs, _handler, ...) \
+static DECLARE_UVERBS_METHOD(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name), \
+ _method, _handler, ##__VA_ARGS__); \
+ \
+static DECLARE_UVERBS_OBJECT(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name), \
+ _object, _type_attrs, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _method_, _name)); \
+ \
+static DECLARE_UVERBS_OBJECT_TREE(_name, \
+ &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
+ _object_, _name))
+
+/* A very common use case is that the driver doesn't override the handler and
+ * type_attrs. Therefore, we provide a simplified macro for this common case.
+ */
+#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object, _method, ...) \
+ ADD_UVERBS_ATTRIBUTES(_name, _object, _method, UVERBS_NO_OVERRIDE, \
+ UVERBS_NO_OVERRIDE, ##__VA_ARGS__)
+
+#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 5f8e20bbd67c..9d56cdb84655 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -37,26 +37,10 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
+#define UVERBS_OBJECT(id) uverbs_object_##id
+
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-extern const struct uverbs_object_def uverbs_object_comp_channel;
-extern const struct uverbs_object_def uverbs_object_cq;
-extern const struct uverbs_object_def uverbs_object_qp;
-extern const struct uverbs_object_def uverbs_object_rwq_ind_table;
-extern const struct uverbs_object_def uverbs_object_wq;
-extern const struct uverbs_object_def uverbs_object_srq;
-extern const struct uverbs_object_def uverbs_object_ah;
-extern const struct uverbs_object_def uverbs_object_flow;
-extern const struct uverbs_object_def uverbs_object_mr;
-extern const struct uverbs_object_def uverbs_object_mw;
-extern const struct uverbs_object_def uverbs_object_pd;
-extern const struct uverbs_object_def uverbs_object_xrcd;
-extern const struct uverbs_object_def uverbs_object_device;
-
-extern const struct uverbs_object_tree_def uverbs_default_objects;
-static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
-{
- return &uverbs_default_objects;
-}
+const struct uverbs_object_tree_def *uverbs_default_get_objects(void);
#else
static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void)
{
@@ -72,22 +56,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
return rdma_lookup_get_uobject(type, ucontext, id, write);
}
-#define uobj_get_type(_object) uverbs_object_##_object.type_attrs
+#define uobj_get_type(_object) UVERBS_OBJECT(_object).type_attrs
#define uobj_get_read(_type, _id, _ucontext) \
- __uobj_get(_type, false, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), false, _ucontext, _id)
-#define uobj_get_obj_read(_object, _id, _ucontext) \
+#define uobj_get_obj_read(_object, _type, _id, _ucontext) \
({ \
struct ib_uobject *__uobj = \
- __uobj_get(uverbs_object_##_object.type_attrs, \
+ __uobj_get(uobj_get_type(_type), \
false, _ucontext, _id); \
\
(struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\
})
#define uobj_get_write(_type, _id, _ucontext) \
- __uobj_get(_type, true, _ucontext, _id)
+ __uobj_get(uobj_get_type(_type), true, _ucontext, _id)
static inline void uobj_put_read(struct ib_uobject *uobj)
{
@@ -124,7 +108,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type
}
#define uobj_alloc(_type, ucontext) \
- __uobj_alloc(_type, ucontext)
+ __uobj_alloc(uobj_get_type(_type), ucontext)
#endif
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index db54115be044..a7a6111e50c7 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,15 +53,20 @@ struct bnxt_re_uctx_resp {
__u32 rsvd;
};
+/*
+ * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is
+ * not 8 byted aligned. To avoid undesired padding in various cases we have to
+ * set this struct to packed.
+ */
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
-};
+} __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req {
- __u64 cq_va;
- __u64 cq_handle;
+ __aligned_u64 cq_va;
+ __aligned_u64 cq_handle;
};
struct bnxt_re_cq_resp {
@@ -72,9 +77,9 @@ struct bnxt_re_cq_resp {
};
struct bnxt_re_qp_req {
- __u64 qpsva;
- __u64 qprva;
- __u64 qp_handle;
+ __aligned_u64 qpsva;
+ __aligned_u64 qprva;
+ __aligned_u64 qp_handle;
};
struct bnxt_re_qp_resp {
@@ -83,8 +88,8 @@ struct bnxt_re_qp_resp {
};
struct bnxt_re_srq_req {
- __u64 srqva;
- __u64 srq_handle;
+ __aligned_u64 srqva;
+ __aligned_u64 srq_handle;
};
struct bnxt_re_srq_resp {
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index d5745e43ae85..9acb4b7a6246 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct iwch_create_cq_req {
- __u64 user_rptr_addr;
+ __aligned_u64 user_rptr_addr;
};
struct iwch_create_cq_resp_v0 {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_cq_resp {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
__u32 memsize;
@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
};
struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
+ __aligned_u64 key;
+ __aligned_u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
@@ -74,4 +74,9 @@ struct iwch_create_qp_resp {
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
+
+struct iwch_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index 05f71f1bc119..1fefd0140c26 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
+ __aligned_u64 key;
+ __aligned_u64 gts_key;
+ __aligned_u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
@@ -59,13 +59,13 @@ enum {
};
struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
+ __aligned_u64 ma_sync_key;
+ __aligned_u64 sq_key;
+ __aligned_u64 rq_key;
+ __aligned_u64 sq_db_gts_key;
+ __aligned_u64 rq_db_gts_key;
+ __aligned_u64 sq_memsize;
+ __aligned_u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
@@ -75,8 +75,13 @@ struct c4iw_create_qp_resp {
};
struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
+ __aligned_u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
+
+struct c4iw_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h
index 9de78c5ee913..8f3d9fe7b141 100644
--- a/include/uapi/rdma/hfi/hfi1_ioctl.h
+++ b/include/uapi/rdma/hfi/hfi1_ioctl.h
@@ -79,7 +79,7 @@ struct hfi1_user_info {
};
struct hfi1_ctxt_info {
- __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info {
/* virtual address of first page in transfer */
- __u64 vaddr;
+ __aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */
- __u64 tidlist;
+ __aligned_u64 tidlist;
/* number of tids programmed by this request */
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/
__u32 bthqp;
/* PIO credit return address, */
- __u64 sc_credits_addr;
+ __aligned_u64 sc_credits_addr;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase_sop;
+ __aligned_u64 pio_bufbase_sop;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase;
+ __aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */
- __u64 rcvhdr_bufbase;
+ __aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */
- __u64 rcvegr_bufbase;
+ __aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */
- __u64 sdma_comp_bufbase;
+ __aligned_u64 sdma_comp_bufbase;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space.
@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow
*/
- __u64 user_regbase;
+ __aligned_u64 user_regbase;
/* notification events */
- __u64 events_bufbase;
+ __aligned_u64 events_bufbase;
/* status page */
- __u64 status_bufbase;
+ __aligned_u64 status_bufbase;
/* rcvhdrtail update */
- __u64 rcvhdrtail_base;
+ __aligned_u64 rcvhdrtail_base;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
- __u64 subctxt_uregbase;
- __u64 subctxt_rcvegrbuf;
- __u64 subctxt_rcvhdrbuf;
+ __aligned_u64 subctxt_uregbase;
+ __aligned_u64 subctxt_rcvegrbuf;
+ __aligned_u64 subctxt_rcvhdrbuf;
};
#endif /* _LINIUX__HFI1_IOCTL_H */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 791bea2f8297..c6a984c0c881 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space.
*/
struct hfi1_status {
- __u64 dev; /* device/hw status bits */
- __u64 port; /* port state and status bits */
+ __aligned_u64 dev; /* device/hw status bits */
+ __aligned_u64 port; /* port state and status bits */
char freezemsg[0];
};
@@ -219,7 +219,7 @@ struct sdma_req_info {
* in charge of managing its own ring.
*/
__u16 comp_idx;
-} __packed;
+} __attribute__((__packed__));
/*
* SW KDETH header.
@@ -230,7 +230,7 @@ struct hfi1_kdeth_header {
__le16 jkey;
__le16 hcrc;
__le32 swdata[7];
-} __packed;
+} __attribute__((__packed__));
/*
* Structure describing the headers that User space uses. The
@@ -241,7 +241,7 @@ struct hfi1_pkt_header {
__be16 lrh[4];
__be32 bth[3];
struct hfi1_kdeth_header kdeth;
-} __packed;
+} __attribute__((__packed__));
/*
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index a9c03b0eed57..7092c8de4bd8 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -37,19 +37,35 @@
#include <linux/types.h>
struct hns_roce_ib_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
+};
+
+struct hns_roce_ib_create_cq_resp {
+ __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
+ __aligned_u64 cap_flags;
};
struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
};
+struct hns_roce_ib_create_qp_resp {
+ __aligned_u64 cap_flags;
+};
+
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 reserved;
};
+
+struct hns_roce_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
new file mode 100644
index 000000000000..79890baa6fdb
--- /dev/null
+++ b/include/uapi/rdma/i40iw-abi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_ABI_H
+#define I40IW_ABI_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_VER 5
+
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __aligned_u64 user_cq_buffer;
+ __aligned_u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h
index f4041bdc4d08..4a8f9562f7cd 100644
--- a/include/uapi/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
- __u64 uid;
- __u64 response;
+ __aligned_u64 uid;
+ __aligned_u64 response;
};
struct ib_ucm_create_id_resp {
@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
};
struct ib_ucm_attr_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
};
struct ib_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -123,7 +123,7 @@ struct ib_ucm_notify {
};
struct ib_ucm_private_data {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type;
__u32 psn;
__be64 sid;
- __u64 data;
- __u64 primary_path;
- __u64 alternate_path;
+ __aligned_u64 data;
+ __aligned_u64 primary_path;
+ __aligned_u64 alternate_path;
__u8 len;
__u8 peer_to_peer;
__u8 responder_resources;
@@ -153,8 +153,8 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
- __u64 uid;
- __u64 data;
+ __aligned_u64 uid;
+ __aligned_u64 data;
__u32 id;
__u32 qpn;
__u32 psn;
@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info {
__u32 id;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
};
struct ib_ucm_mra {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 timeout;
@@ -188,8 +188,8 @@ struct ib_ucm_mra {
};
struct ib_ucm_lap {
- __u64 path;
- __u64 data;
+ __aligned_u64 path;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
__be64 sid;
- __u64 data;
- __u64 path;
+ __aligned_u64 data;
+ __aligned_u64 path;
__u16 reserved_pkey;
__u8 len;
__u8 max_cm_retries;
@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn;
__u32 qkey;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures.
*/
struct ib_ucm_event_get {
- __u64 response;
- __u64 data;
- __u64 info;
+ __aligned_u64 response;
+ __aligned_u64 data;
+ __aligned_u64 info;
__u8 data_len;
__u8 info_len;
__u8 reserved[6];
@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 present;
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
new file mode 100644
index 000000000000..83e3890eef20
--- /dev/null
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_IOCTL_CMDS_H
+#define IB_USER_IOCTL_CMDS_H
+
+#define UVERBS_ID_NS_MASK 0xF000
+#define UVERBS_ID_NS_SHIFT 12
+
+#define UVERBS_UDATA_DRIVER_DATA_NS 1
+#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
+
+enum uverbs_default_objects {
+ UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
+ UVERBS_OBJECT_PD,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_OBJECT_CQ,
+ UVERBS_OBJECT_QP,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_OBJECT_AH,
+ UVERBS_OBJECT_MR,
+ UVERBS_OBJECT_MW,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_OBJECT_WQ,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_OBJECT_DM,
+};
+
+enum {
+ UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
+ UVERBS_ATTR_UHW_OUT,
+};
+
+enum uverbs_attrs_create_cq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+};
+
+enum uverbs_attrs_destroy_cq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_ATTR_DESTROY_CQ_RESP,
+};
+
+enum uverbs_attrs_create_flow_action_esp {
+ UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+};
+
+enum uverbs_attrs_destroy_flow_action_esp {
+ UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+};
+
+enum uverbs_methods_cq {
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_METHOD_CQ_DESTROY,
+};
+
+enum uverbs_methods_actions_flow_action_ops {
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+};
+
+enum uverbs_attrs_alloc_dm_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+};
+
+enum uverbs_attrs_free_dm_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DM_HANDLE,
+};
+
+enum uverbs_methods_dm {
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_METHOD_DM_FREE,
+};
+
+enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+};
+
+enum uverbs_methods_mr {
+ UVERBS_METHOD_DM_MR_REG,
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 842792eae383..04e46ea517d3 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,52 +34,69 @@
#ifndef IB_USER_IOCTL_VERBS_H
#define IB_USER_IOCTL_VERBS_H
-#include <rdma/rdma_user_ioctl.h>
-
-#define UVERBS_UDATA_DRIVER_DATA_NS 1
-#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
-
-enum uverbs_default_objects {
- UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
- UVERBS_OBJECT_PD,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_OBJECT_CQ,
- UVERBS_OBJECT_QP,
- UVERBS_OBJECT_SRQ,
- UVERBS_OBJECT_AH,
- UVERBS_OBJECT_MR,
- UVERBS_OBJECT_MW,
- UVERBS_OBJECT_FLOW,
- UVERBS_OBJECT_XRCD,
- UVERBS_OBJECT_RWQ_IND_TBL,
- UVERBS_OBJECT_WQ,
- UVERBS_OBJECT_LAST,
+#include <linux/types.h>
+
+#ifndef RDMA_UAPI_PTR
+#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
+#endif
+
+enum ib_uverbs_flow_action_esp_keymat {
+ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
};
-enum {
- UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
- UVERBS_UHW_OUT,
+enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo {
+ IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ,
};
-enum uverbs_create_cq_cmd_attr_ids {
- CREATE_CQ_HANDLE,
- CREATE_CQ_CQE,
- CREATE_CQ_USER_HANDLE,
- CREATE_CQ_COMP_CHANNEL,
- CREATE_CQ_COMP_VECTOR,
- CREATE_CQ_FLAGS,
- CREATE_CQ_RESP_CQE,
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
+ __aligned_u64 iv;
+ __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */
+
+ __u32 salt;
+ __u32 icv_len;
+
+ __u32 key_len;
+ __u32 aes_key[256 / 32];
};
-enum uverbs_destroy_cq_cmd_attr_ids {
- DESTROY_CQ_HANDLE,
- DESTROY_CQ_RESP,
+enum ib_uverbs_flow_action_esp_replay {
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE,
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP,
};
-enum uverbs_actions_cq_ops {
- UVERBS_CQ_CREATE,
- UVERBS_CQ_DESTROY,
+struct ib_uverbs_flow_action_esp_replay_bmp {
+ __u32 size;
};
-#endif
+enum ib_uverbs_flow_action_esp_flags {
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2,
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3,
+};
+
+struct ib_uverbs_flow_action_esp_encap {
+ /* This struct represents a list of pointers to flow_xxxx_filter that
+ * encapsulates the payload in ESP tunnel mode.
+ */
+ RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */
+ RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr);
+ __u16 len; /* Len of the filter struct val_ptr points to */
+ __u16 type; /* Use flow_spec_type enum */
+};
+
+struct ib_uverbs_flow_action_esp {
+ __u32 spi;
+ __u32 seq;
+ __u32 tfc_pad;
+ __u32 flags;
+ __aligned_u64 hard_limit_pkts;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 330a3c5f1aa8..ef92118dad97 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __u64 data[0];
+ __aligned_u64 data[0];
};
/*
@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
- __u64 method_mask[2];
+ __aligned_u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 04d0e67b1312..9be07394fdbe 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -117,13 +117,13 @@ enum {
*/
struct ib_uverbs_async_event_desc {
- __u64 element;
+ __aligned_u64 element;
__u32 event_type; /* enum ib_event_type */
__u32 reserved;
};
struct ib_uverbs_comp_event_desc {
- __u64 cq_handle;
+ __aligned_u64 cq_handle;
};
struct ib_uverbs_cq_moderation_caps {
@@ -141,10 +141,7 @@ struct ib_uverbs_cq_moderation_caps {
*/
#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
-#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
-#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
-
-#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
+#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u
struct ib_uverbs_cmd_hdr {
__u32 command;
@@ -153,15 +150,15 @@ struct ib_uverbs_cmd_hdr {
};
struct ib_uverbs_ex_cmd_hdr {
- __u64 response;
+ __aligned_u64 response;
__u16 provider_in_words;
__u16 provider_out_words;
__u32 cmd_hdr_reserved;
};
struct ib_uverbs_get_context {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_get_context_resp {
@@ -170,16 +167,16 @@ struct ib_uverbs_get_context_resp {
};
struct ib_uverbs_query_device {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_device_resp {
- __u64 fw_ver;
+ __aligned_u64 fw_ver;
__be64 node_guid;
__be64 sys_image_guid;
- __u64 max_mr_size;
- __u64 page_size_cap;
+ __aligned_u64 max_mr_size;
+ __aligned_u64 page_size_cap;
__u32 vendor_id;
__u32 vendor_part_id;
__u32 hw_ver;
@@ -224,7 +221,7 @@ struct ib_uverbs_ex_query_device {
};
struct ib_uverbs_odp_caps {
- __u64 general_caps;
+ __aligned_u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
@@ -263,21 +260,22 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
- __u64 timestamp_mask;
- __u64 hca_core_clock; /* in KHZ */
- __u64 device_cap_flags_ex;
+ __aligned_u64 timestamp_mask;
+ __aligned_u64 hca_core_clock; /* in KHZ */
+ __aligned_u64 device_cap_flags_ex;
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps tm_caps;
struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
+ __aligned_u64 max_dm_size;
};
struct ib_uverbs_query_port {
- __u64 response;
+ __aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_port_resp {
@@ -305,8 +303,8 @@ struct ib_uverbs_query_port_resp {
};
struct ib_uverbs_alloc_pd {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_alloc_pd_resp {
@@ -318,10 +316,10 @@ struct ib_uverbs_dealloc_pd {
};
struct ib_uverbs_open_xrcd {
- __u64 response;
+ __aligned_u64 response;
__u32 fd;
__u32 oflags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_open_xrcd_resp {
@@ -333,13 +331,13 @@ struct ib_uverbs_close_xrcd {
};
struct ib_uverbs_reg_mr {
- __u64 response;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 response;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_reg_mr_resp {
@@ -349,12 +347,12 @@ struct ib_uverbs_reg_mr_resp {
};
struct ib_uverbs_rereg_mr {
- __u64 response;
+ __aligned_u64 response;
__u32 mr_handle;
__u32 flags;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
};
@@ -369,7 +367,7 @@ struct ib_uverbs_dereg_mr {
};
struct ib_uverbs_alloc_mw {
- __u64 response;
+ __aligned_u64 response;
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
@@ -385,7 +383,7 @@ struct ib_uverbs_dealloc_mw {
};
struct ib_uverbs_create_comp_channel {
- __u64 response;
+ __aligned_u64 response;
};
struct ib_uverbs_create_comp_channel_resp {
@@ -393,13 +391,13 @@ struct ib_uverbs_create_comp_channel_resp {
};
struct ib_uverbs_create_cq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_ex_create_cq_flags {
@@ -408,7 +406,7 @@ enum ib_uverbs_ex_create_cq_flags {
};
struct ib_uverbs_ex_create_cq {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
@@ -429,26 +427,26 @@ struct ib_uverbs_ex_create_cq_resp {
};
struct ib_uverbs_resize_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_poll_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 ne;
};
struct ib_uverbs_wc {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 status;
__u32 opcode;
__u32 vendor_err;
@@ -480,7 +478,7 @@ struct ib_uverbs_req_notify_cq {
};
struct ib_uverbs_destroy_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 reserved;
};
@@ -549,8 +547,8 @@ struct ib_uverbs_qp_attr {
};
struct ib_uverbs_create_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -564,7 +562,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_create_qp_mask {
@@ -590,7 +588,7 @@ enum {
};
struct ib_uverbs_ex_create_qp {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -611,13 +609,13 @@ struct ib_uverbs_ex_create_qp {
};
struct ib_uverbs_open_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
/* also used for open response */
@@ -658,10 +656,10 @@ struct ib_uverbs_qp_dest {
};
struct ib_uverbs_query_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_qp_resp {
@@ -695,7 +693,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_modify_qp {
@@ -725,7 +723,7 @@ struct ib_uverbs_modify_qp {
__u8 alt_port_num;
__u8 alt_timeout;
__u8 reserved[2];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_ex_modify_qp {
@@ -743,7 +741,7 @@ struct ib_uverbs_ex_modify_qp_resp {
};
struct ib_uverbs_destroy_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 reserved;
};
@@ -759,13 +757,13 @@ struct ib_uverbs_destroy_qp_resp {
* document the ABI.
*/
struct ib_uverbs_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct ib_uverbs_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -775,14 +773,14 @@ struct ib_uverbs_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
@@ -796,7 +794,7 @@ struct ib_uverbs_send_wr {
};
struct ib_uverbs_post_send {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -809,13 +807,13 @@ struct ib_uverbs_post_send_resp {
};
struct ib_uverbs_recv_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 reserved;
};
struct ib_uverbs_post_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -828,7 +826,7 @@ struct ib_uverbs_post_recv_resp {
};
struct ib_uverbs_post_srq_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 wr_count;
__u32 sge_count;
@@ -841,8 +839,8 @@ struct ib_uverbs_post_srq_recv_resp {
};
struct ib_uverbs_create_ah {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
@@ -861,7 +859,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_detach_mcast {
@@ -869,7 +867,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_flow_spec_hdr {
@@ -877,7 +875,7 @@ struct ib_uverbs_flow_spec_hdr {
__u16 size;
__u16 reserved;
/* followed by flow_spec */
- __u64 flow_spec_data[0];
+ __aligned_u64 flow_spec_data[0];
};
struct ib_uverbs_flow_eth_filter {
@@ -987,6 +985,19 @@ struct ib_uverbs_flow_spec_action_drop {
};
};
+struct ib_uverbs_flow_spec_action_handle {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -1004,6 +1015,24 @@ struct ib_uverbs_flow_spec_tunnel {
struct ib_uverbs_flow_tunnel_filter mask;
};
+struct ib_uverbs_flow_spec_esp_filter {
+ __u32 spi;
+ __u32 seq;
+};
+
+struct ib_uverbs_flow_spec_esp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_esp_filter val;
+ struct ib_uverbs_flow_spec_esp_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
@@ -1036,18 +1065,18 @@ struct ib_uverbs_destroy_flow {
};
struct ib_uverbs_create_srq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_xsrq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 srq_type;
__u32 pd_handle;
__u32 max_wr;
@@ -1056,7 +1085,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_srq_resp {
@@ -1071,14 +1100,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq_resp {
@@ -1089,7 +1118,7 @@ struct ib_uverbs_query_srq_resp {
};
struct ib_uverbs_destroy_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
};
@@ -1101,7 +1130,7 @@ struct ib_uverbs_destroy_srq_resp {
struct ib_uverbs_ex_create_wq {
__u32 comp_mask;
__u32 wq_type;
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 cq_handle;
__u32 max_wr;
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 7f9c37346613..04f64bc4045f 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -59,6 +59,10 @@ struct mlx4_ib_alloc_ucontext_resp_v3 {
__u16 bf_regs_per_page;
};
+enum {
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0,
+};
+
struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
@@ -73,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
};
struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_cq_resp {
@@ -83,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
};
struct mlx4_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
};
struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_srq_resp {
@@ -97,7 +101,7 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
@@ -106,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
};
struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
@@ -116,8 +120,8 @@ struct mlx4_ib_create_qp {
};
struct mlx4_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
@@ -156,4 +160,32 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
+struct mlx4_ib_rss_caps {
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
+enum query_device_resp_mask {
+ MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+};
+
+struct mlx4_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported.
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __aligned_u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ __u32 reserved;
+ struct mlx4_ib_rss_caps rss_caps;
+ struct mlx4_ib_tso_caps tso_caps;
+};
+
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 1111aa4e7c1e..cb4a02c4a1ce 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
- __u64 lib_caps;
+ __aligned_u64 lib_caps;
};
enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -107,6 +107,14 @@ enum mlx5_user_inline_mode {
MLX5_USER_INLINE_MODE_TCP_UDP,
};
+enum {
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -118,14 +126,14 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved1;
+ __u16 flow_action_flags;
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
__u8 clock_info_versions;
- __u64 hca_core_clock_offset;
+ __aligned_u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
@@ -147,7 +155,7 @@ struct mlx5_ib_tso_caps {
};
struct mlx5_ib_rss_caps {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7];
};
@@ -163,6 +171,10 @@ struct mlx5_ib_cqe_comp_caps {
__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
};
+enum mlx5_ib_packet_pacing_cap_flags {
+ MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
+};
+
struct mlx5_packet_pacing_caps {
__u32 qp_rate_limit_min;
__u32 qp_rate_limit_max; /* In kpbs */
@@ -172,7 +184,8 @@ struct mlx5_packet_pacing_caps {
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32 supported_qpts;
- __u32 reserved;
+ __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
+ __u8 reserved[3];
};
enum mlx5_ib_mpw_caps {
@@ -243,8 +256,8 @@ enum mlx5_ib_create_cq_flags {
};
struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
@@ -257,15 +270,15 @@ struct mlx5_ib_create_cq_resp {
};
struct mlx5_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx;
@@ -278,8 +291,8 @@ struct mlx5_ib_create_srq_resp {
};
struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
@@ -287,8 +300,8 @@ struct mlx5_ib_create_qp {
__u32 uidx;
__u32 bfreg_index;
union {
- __u64 sq_buf_addr;
- __u64 access_key;
+ __aligned_u64 sq_buf_addr;
+ __aligned_u64 access_key;
};
};
@@ -314,12 +327,13 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
/* Save bits for future fields */
MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6];
@@ -330,6 +344,7 @@ struct mlx5_ib_create_qp_rss {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
+ __u32 reserved;
};
struct mlx5_ib_alloc_mw {
@@ -344,8 +359,8 @@ enum mlx5_ib_create_wq_mask {
};
struct mlx5_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
@@ -362,6 +377,18 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_burst_info {
+ __u32 max_burst_sz;
+ __u16 typical_pkt_sz;
+ __u16 reserved;
+};
+
+struct mlx5_ib_modify_qp {
+ __u32 comp_mask;
+ struct mlx5_ib_burst_info burst_info;
+ __u32 reserved;
+};
+
struct mlx5_ib_modify_qp_resp {
__u32 response_length;
__u32 dctn;
@@ -385,13 +412,13 @@ struct mlx5_ib_modify_wq {
struct mlx5_ib_clock_info {
__u32 sign;
__u32 resv;
- __u64 nsec;
- __u64 cycles;
- __u64 frac;
+ __aligned_u64 nsec;
+ __aligned_u64 cycles;
+ __aligned_u64 frac;
__u32 mult;
__u32 shift;
- __u64 mask;
- __u64 overflow_period;
+ __aligned_u64 mask;
+ __aligned_u64 overflow_period;
};
enum mlx5_ib_mmap_cmd {
@@ -403,6 +430,7 @@ enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_CORE_CLOCK = 5,
MLX5_IB_MMAP_ALLOC_WC = 6,
MLX5_IB_MMAP_CLOCK_INFO = 7,
+ MLX5_IB_MMAP_DEVICE_MEM = 8,
};
enum {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
new file mode 100644
index 000000000000..f7d685ef2d1f
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_CMDS_H
+#define MLX5_USER_IOCTL_CMDS_H
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+enum mlx5_ib_create_flow_action_attrs {
+ /* This attribute belong to the driver namespace */
+ MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_alloc_dm_attrs {
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
new file mode 100644
index 000000000000..8a2fb33f3ed4
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_VERBS_H
+#define MLX5_USER_IOCTL_VERBS_H
+
+#include <linux/types.h>
+
+enum mlx5_ib_uapi_flow_action_flags {
+ MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
+};
+
+#endif
+
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
index 3020d8a907a7..ac756cd9e807 100644
--- a/include/uapi/rdma/mthca-abi.h
+++ b/include/uapi/rdma/mthca-abi.h
@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq {
__u32 lkey;
__u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
+ __aligned_u64 arm_db_page;
+ __aligned_u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
- __u64 db_page;
+ __aligned_u64 db_page;
};
struct mthca_create_srq_resp {
@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
+ __aligned_u64 sq_db_page;
+ __aligned_u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
index f5b2437aab28..35bfd4015d07 100644
--- a/include/uapi/rdma/nes-abi.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
};
struct nes_create_cq_req {
- __u64 user_cq_buffer;
+ __aligned_u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_qp_buffer;
};
enum iwnes_memreg_type {
diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h
index ad64a3cea1cd..284d47b41f6e 100644
--- a/include/uapi/rdma/ocrdma-abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -55,17 +55,17 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size;
__u32 max_inline_data;
__u32 dpp_wqe_size;
- __u64 ah_tbl_page;
+ __aligned_u64 ah_tbl_page;
__u32 ah_tbl_len;
__u32 rqe_size;
__u8 fw_ver[32];
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_alloc_pd_uresp {
@@ -73,7 +73,7 @@ struct ocrdma_alloc_pd_uresp {
__u32 dpp_enabled;
__u32 dpp_page_addr_hi;
__u32 dpp_page_addr_lo;
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_create_cq_ureq {
@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size;
__u32 num_pages;
__u32 max_hw_cqe;
- __u64 page_addr[MAX_CQ_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 page_addr[MAX_CQ_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 phase_change;
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
#define MAX_QP_PAGES 8
@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size;
__u32 num_sq_pages;
__u32 num_rq_pages;
- __u64 sq_page_addr[MAX_QP_PAGES];
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 sq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 dpp_credit;
__u32 dpp_offset;
@@ -126,8 +126,8 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd[11];
-} __packed;
+ __aligned_u64 rsvd[11];
+};
struct ocrdma_create_srq_uresp {
__u16 rq_dbid;
@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size;
__u32 num_rq_pages;
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 num_rqe_allocated;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd2;
- __u64 rsvd3;
+ __aligned_u64 rsvd2;
+ __aligned_u64 rsvd3;
};
#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 261c6db4623e..8ba098900e9a 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -40,7 +40,7 @@
/* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp {
- __u64 db_pa;
+ __aligned_u64 db_pa;
__u32 db_size;
__u32 max_send_wr;
@@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp {
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
+ __u32 reserved;
};
struct qedr_alloc_pd_ureq {
- __u64 rsvd1;
+ __aligned_u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
+ __u32 reserved;
};
struct qedr_create_cq_ureq {
- __u64 addr;
- __u64 len;
+ __aligned_u64 addr;
+ __aligned_u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
+ __u16 reserved;
};
struct qedr_create_qp_ureq {
@@ -79,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */
/* user space virtual address of SQ buffer */
- __u64 sq_addr;
+ __aligned_u64 sq_addr;
/* length of SQ buffer */
- __u64 sq_len;
+ __aligned_u64 sq_len;
/* RQ */
/* user space virtual address of RQ buffer */
- __u64 rq_addr;
+ __aligned_u64 rq_addr;
/* length of RQ buffer */
- __u64 rq_len;
+ __aligned_u64 rq_len;
};
struct qedr_create_qp_uresp {
@@ -105,6 +108,7 @@ struct qedr_create_qp_uresp {
__u16 rq_icid;
__u32 rq_db2_offset;
+ __u32 reserved;
};
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 4c77e2a7b07e..0ce0943fc808 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -238,6 +238,14 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+ RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -350,6 +358,49 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */
+ /*
+ * rdma_cm_id port space.
+ */
+ RDMA_NLDEV_ATTR_RES_PS, /* u32 */
+ /*
+ * Source and destination socket addresses
+ */
+ RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */
+ RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */
+
+ RDMA_NLDEV_ATTR_RES_CQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQE, /* u32 */
+ RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */
+ RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */
+
+ RDMA_NLDEV_ATTR_RES_MR, /* nested table */
+ RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */
+ RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_PD, /* nested table */
+ RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
+
+ /*
+ * Provides logical name and index of netdevice which is
+ * connected to physical port. This information is relevant
+ * for RoCE and iWARP.
+ *
+ * The netdevices which are associated with containers are
+ * supposed to be exported together with GID table once it
+ * will be exposed through the netlink. Because the
+ * associated netdevices are properties of GIDs.
+ */
+ RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index c83ef0026079..e1269024af47 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -70,6 +70,14 @@ enum {
RDMA_USER_CM_CMD_JOIN_MCAST
};
+/* See IBTA Annex A11, servies ID bytes 4 & 5 */
+enum rdma_ucm_port_space {
+ RDMA_PS_IPOIB = 0x0002,
+ RDMA_PS_IB = 0x013F,
+ RDMA_PS_TCP = 0x0106,
+ RDMA_PS_UDP = 0x0111,
+};
+
/*
* command ABI structures.
*/
@@ -80,9 +88,9 @@ struct rdma_ucm_cmd_hdr {
};
struct rdma_ucm_create_id {
- __u64 uid;
- __u64 response;
- __u16 ps;
+ __aligned_u64 uid;
+ __aligned_u64 response;
+ __u16 ps; /* use enum rdma_ucm_port_space */
__u8 qp_type;
__u8 reserved[5];
};
@@ -92,7 +100,7 @@ struct rdma_ucm_create_id_resp {
};
struct rdma_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -102,7 +110,7 @@ struct rdma_ucm_destroy_id_resp {
};
struct rdma_ucm_bind_ip {
- __u64 response;
+ __aligned_u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -143,13 +151,13 @@ enum {
};
struct rdma_ucm_query {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 option;
};
struct rdma_ucm_query_route_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
@@ -159,7 +167,7 @@ struct rdma_ucm_query_route_resp {
};
struct rdma_ucm_query_addr_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
__u8 port_num;
__u8 reserved;
__u16 pkey;
@@ -210,7 +218,7 @@ struct rdma_ucm_listen {
};
struct rdma_ucm_accept {
- __u64 uid;
+ __aligned_u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
@@ -228,7 +236,7 @@ struct rdma_ucm_disconnect {
};
struct rdma_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -239,8 +247,8 @@ struct rdma_ucm_notify {
};
struct rdma_ucm_join_ip_mcast {
- __u64 response; /* rdma_ucm_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucm_create_id_resp */
+ __aligned_u64 uid;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -253,8 +261,8 @@ enum {
};
struct rdma_ucm_join_mcast {
- __u64 response; /* rdma_ucma_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucma_create_id_resp */
+ __aligned_u64 uid;
__u32 id;
__u16 addr_size;
__u16 join_flags;
@@ -262,18 +270,23 @@ struct rdma_ucm_join_mcast {
};
struct rdma_ucm_get_event {
- __u64 response;
+ __aligned_u64 response;
};
struct rdma_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 status;
+ /*
+ * NOTE: This union is not aligned to 8 bytes so none of the union
+ * members may contain a u64 or anything with higher alignment than 4.
+ */
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
+ __u32 reserved;
};
/* Option levels */
@@ -291,7 +304,7 @@ enum {
};
struct rdma_ucm_set_option {
- __u64 optval;
+ __aligned_u64 optval;
__u32 id;
__u32 level;
__u32 optname;
@@ -299,7 +312,7 @@ struct rdma_ucm_set_option {
};
struct rdma_ucm_migrate_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 fd;
};
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 46de0885e800..d223f4164a0f 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -34,49 +34,13 @@
#ifndef RDMA_USER_IOCTL_H
#define RDMA_USER_IOCTL_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
#include <rdma/ib_user_mad.h>
#include <rdma/hfi/hfi1_ioctl.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
-/* Documentation/ioctl/ioctl-number.txt */
-#define RDMA_IOCTL_MAGIC 0x1b
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
-#define RDMA_VERBS_IOCTL \
- _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
-
-#define UVERBS_ID_NS_MASK 0xF000
-#define UVERBS_ID_NS_SHIFT 12
-
-enum {
- /* User input */
- UVERBS_ATTR_F_MANDATORY = 1U << 0,
- /*
- * Valid output bit should be ignored and considered set in
- * mandatory fields. This bit is kernel output.
- */
- UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
-};
-
-struct ib_uverbs_attr {
- __u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
- __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
- __u16 reserved;
- __aligned_u64 data; /* ptr to command, inline data or idr/fd */
-};
-
-struct ib_uverbs_ioctl_hdr {
- __u16 length;
- __u16 object_id;
- __u16 method_id;
- __u16 num_attrs;
- __aligned_u64 reserved;
- struct ib_uverbs_attr attrs[0];
-};
-
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
new file mode 100644
index 000000000000..1da5a1e1f3a8
--- /dev/null
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_IOCTL_CMDS_H
+#define RDMA_USER_IOCTL_CMDS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Documentation/ioctl/ioctl-number.txt */
+#define RDMA_IOCTL_MAGIC 0x1b
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ union {
+ struct {
+ __u8 elem_id;
+ __u8 reserved;
+ } enum_data;
+ __u16 reserved;
+ } attr_data;
+ __aligned_u64 data; /* ptr to command, inline data or idr/fd */
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __aligned_u64 reserved1;
+ __u32 driver_id;
+ __u32 reserved2;
+ struct ib_uverbs_attr attrs[0];
+};
+
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+};
+
+#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index bdeea948b2f3..1f8a9e7daea4 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -35,6 +35,9 @@
#define RDMA_USER_RXE_H
#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
union rxe_gid {
__u8 raw[16];
@@ -55,16 +58,17 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
+ __u16 reserved1;
+ __u32 reserved2;
struct rxe_global_route grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
};
struct rxe_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -74,36 +78,42 @@ struct rxe_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
+ __u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
+ __u32 reserved;
} atomic;
struct {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
} ud;
+ /* reg is only used by the kernel and is not part of the uapi */
struct {
- struct ib_mr *mr;
+ union {
+ struct ib_mr *mr;
+ __aligned_u64 reserved;
+ };
__u32 key;
- int access;
+ __u32 access;
} reg;
} wr;
};
struct rxe_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct mminfo {
- __u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -114,6 +124,7 @@ struct rxe_dma_info {
__u32 cur_sge;
__u32 num_sge;
__u32 sge_offset;
+ __u32 reserved;
union {
__u8 inline_data[0];
struct rxe_sge sge[0];
@@ -125,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av;
__u32 status;
__u32 state;
- __u64 iova;
+ __aligned_u64 iova;
__u32 mask;
__u32 first_psn;
__u32 last_psn;
@@ -136,10 +147,33 @@ struct rxe_send_wqe {
};
struct rxe_recv_wqe {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_resize_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_create_qp_resp {
+ struct mminfo rq_mi;
+ struct mminfo sq_mi;
+};
+
+struct rxe_create_srq_resp {
+ struct mminfo mi;
+ __u32 srq_num;
+ __u32 reserved;
+};
+
+struct rxe_modify_srq_cmd {
+ __aligned_u64 mmap_info_addr;
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 02ca0d0f1eb7..d13fd490b66d 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
};
struct pvrdma_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp {
};
struct pvrdma_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
struct pvrdma_create_srq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp {
};
struct pvrdma_create_qp {
- __u64 rbuf_addr;
- __u64 sbuf_addr;
+ __aligned_u64 rbuf_addr;
+ __aligned_u64 sbuf_addr;
__u32 rbuf_size;
__u32 sbuf_size;
- __u64 qp_addr;
+ __aligned_u64 qp_addr;
};
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
- __u64 swap_val;
- __u64 compare_val;
- __u64 swap_mask;
- __u64 compare_mask;
+ __aligned_u64 swap_val;
+ __aligned_u64 compare_val;
+ __aligned_u64 swap_mask;
+ __aligned_u64 compare_mask;
};
/* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add {
- __u64 add_val;
- __u64 field_boundary;
+ __aligned_u64 add_val;
+ __aligned_u64 field_boundary;
};
/* PVRDMA address vector. */
@@ -207,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */
struct pvrdma_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
/* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
};
@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
__u32 opcode; /* operation type */
@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u8 reserved[4];
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 log_arg_sz;
__u32 rkey;
union {
@@ -255,13 +255,14 @@ struct pvrdma_sq_wqe_hdr {
} wr_data;
} masked_atomics;
struct {
- __u64 iova_start;
- __u64 pl_pdir_dma;
+ __aligned_u64 iova_start;
+ __aligned_u64 pl_pdir_dma;
__u32 page_shift;
__u32 page_list_len;
__u32 length;
__u32 access_flags;
__u32 rkey;
+ __u32 reserved;
} fast_reg;
struct {
__u32 remote_qpn;
@@ -274,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */
struct pvrdma_cqe {
- __u64 wr_id;
- __u64 qp;
+ __aligned_u64 wr_id;
+ __aligned_u64 qp;
__u32 opcode;
__u32 status;
__u32 byte_len;