summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 23:05:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 23:05:10 +0300
commit7b1cd95d65eb3b1e13f8a90eb757e0ea232c7899 (patch)
treecbc3ec5d45b04666c24f7c0b1df04a85d29c7d0f /include
parent2155e69a9d9acd42488ef994a4e1ff535438c128 (diff)
parente7996a9a77fc669387da43ff4823b91cc4872bd0 (diff)
downloadlinux-7b1cd95d65eb3b1e13f8a90eb757e0ea232c7899.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull RDMA subsystem updates from Jason Gunthorpe: "Overall this cycle did not have any major excitement, and did not require any shared branch with netdev. Lots of driver updates, particularly of the scale-up and performance variety. The largest body of core work was Parav's patches fixing and restructing some of the core code to make way for future RDMA containerization. Summary: - misc small driver fixups to bnxt_re/hfi1/qib/hns/ocrdma/rdmavt/vmw_pvrdma/nes - several major feature adds to bnxt_re driver: SRIOV VF RoCE support, HugePages support, extended hardware stats support, and SRQ support - a notable number of fixes to the i40iw driver from debugging scale up testing - more work to enable the new hip08 chip in the hns driver - misc small ULP fixups to srp/srpt//ipoib - preparation for srp initiator and target to support the RDMA-CM protocol for connections - add RDMA-CM support to srp initiator, srp target is still a WIP - fixes for a couple of places where ipoib could spam the dmesg log - fix encode/decode of FDR/EDR data rates in the core - many patches from Parav with ongoing work to clean up inconsistencies and bugs in RoCE support around the rdma_cm - mlx5 driver support for the userspace features 'thread domain', 'wallclock timestamps' and 'DV Direct Connected transport'. Support for the firmware dual port rocee capability - core support for more than 32 rdma devices in the char dev allocation - kernel doc updates from Randy Dunlap - new netlink uAPI for inspecting RDMA objects similar in spirit to 'ss' - one minor change to the kobject code acked by Greg KH" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (259 commits) RDMA/nldev: Provide detailed QP information RDMA/nldev: Provide global resource utilization RDMA/core: Add resource tracking for create and destroy PDs RDMA/core: Add resource tracking for create and destroy CQs RDMA/core: Add resource tracking for create and destroy QPs RDMA/restrack: Add general infrastructure to track RDMA resources RDMA/core: Save kernel caller name when creating PD and CQ objects RDMA/core: Use the MODNAME instead of the function name for pd callers RDMA: Move enum ib_cq_creation_flags to uapi headers IB/rxe: Change RDMA_RXE kconfig to use select IB/qib: remove qib_keys.c IB/mthca: remove mthca_user.h RDMA/cm: Fix access to uninitialized variable RDMA/cma: Use existing netif_is_bond_master function IB/core: Avoid SGID attributes query while converting GID from OPA to IB RDMA/mlx5: Avoid memory leak in case of XRCD dealloc failure IB/umad: Fix use of unprotected device pointer IB/iser: Combine substrings for three messages IB/iser: Delete an unnecessary variable initialisation in iser_send_data_out() IB/iser: Delete an error message for a failed memory allocation in iser_send_data_out() ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h32
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/rdma/ib_addr.h38
-rw-r--r--include/rdma/ib_sa.h10
-rw-r--r--include/rdma/ib_verbs.h64
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h31
-rw-r--r--include/rdma/restrack.h157
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/mlx4-abi.h7
-rw-r--r--include/uapi/rdma/mlx5-abi.h53
-rw-r--r--include/uapi/rdma/rdma_netlink.h49
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h12
20 files changed, 521 insertions, 87 deletions
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..e5258ee4e38b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -284,6 +291,7 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a0610427e168..fb7e8b205eb9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
+enum mlx5_dct_atomic_mode {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
@@ -430,6 +440,7 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -788,6 +799,7 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+ struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -826,7 +838,7 @@ struct mlx5_core_dev {
struct mlx5e_resources mlx5e_res;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
@@ -835,6 +847,8 @@ struct mlx5_core_dev {
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct page *clock_info_page;
};
struct mlx5_db {
@@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1391a82da98e..7ac7bd76c7af 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -794,7 +794,10 @@ enum {
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x80];
+ u8 reserved_at_0[0x30];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x40];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
@@ -1067,7 +1070,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
};
enum mlx5_flow_destination_type {
@@ -2616,7 +2624,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 reserved_at_40[0xc];
+
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -3259,7 +3272,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3879,7 +3893,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -5311,7 +5326,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_e[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5532,6 +5549,7 @@ struct mlx5_ifc_init_hca_in_bits {
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
int pid;
};
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
+};
+
struct mlx5_qp_path {
u8 fl_free_ar;
u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *qp,
+ u32 *in, int inlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..64e193e87394 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..d656809f1217 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
* The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr, u16 *vlan_id);
+ struct rdma_dev_addr *dev_addr);
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
int rdma_addr_size(struct sockaddr *addr);
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
@@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
}
}
-static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- struct net_device *dev;
- struct in_device *ip4;
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-
- if (ip4)
- in_dev_put(ip4);
-
- dev_put(dev);
- }
-}
-
+/*
+ * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
+ * They are not applicable to RoCE.
+ * RoCE GIDs are derived from the IP addresses.
+ */
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- if (dev_addr->transport == RDMA_TRANSPORT_IB &&
- dev_addr->dev_type != ARPHRD_INFINIBAND)
- iboe_addr_get_sgid(dev_addr, gid);
- else
- memcpy(gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
+ sizeof(*gid));
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..811cfcfcbe3d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr);
/**
- * ib_init_ah_from_path - Initialize address handle attributes based on an SA
- * path record.
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
*/
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..5263c86fd103 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#include <rdma/restrack.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -300,11 +301,6 @@ struct ib_tm_caps {
u32 max_sge;
};
-enum ib_cq_creation_flags {
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
- IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
-};
-
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
@@ -983,9 +979,9 @@ struct ib_wc {
u32 invalidate_rkey;
} ex;
u32 src_qp;
+ u32 slid;
int wc_flags;
u16 pkey_index;
- u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1078,7 @@ enum ib_qp_type {
IB_QPT_XRC_INI = 9,
IB_QPT_XRC_TGT,
IB_QPT_MAX,
+ IB_QPT_DRIVER = 0xFF,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1526,7 @@ struct ib_pd {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct ib_mr *__internal_mr;
+ struct rdma_restrack_entry res;
};
struct ib_xrcd {
@@ -1538,6 +1536,10 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_ah {
@@ -1569,6 +1571,10 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_srq {
@@ -1745,6 +1751,11 @@ struct ib_qp {
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
u8 port;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mr {
@@ -2351,6 +2362,10 @@ struct ib_device {
#endif
u32 index;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers
+ */
+ struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
@@ -2836,8 +2851,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index);
+ struct net_device *ndev, u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -2858,7 +2872,7 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
#define ib_alloc_pd(device, flags) \
- __ib_alloc_pd((device), (flags), __func__)
+ __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2905,7 +2919,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
+ * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
* @port_num: Port on which the received message arrived.
@@ -2915,9 +2929,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3149,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
}
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller);
+#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
+ __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
@@ -3560,8 +3578,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
+ * @caller: Module name for kernel consumers
*/
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
+#define ib_alloc_xrcd(device) \
+ __ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3793,8 +3814,7 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
u32 port_num)
{
- if ((rdma_protocol_roce(dev, port_num)) ||
- (rdma_protocol_iwarp(dev, port_num)))
+ if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
else if ((rdma_protocol_ib(dev, port_num)) &&
(rdma_cap_opa_ah(dev, port_num)))
@@ -3850,4 +3870,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ibdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
}
+/* Check for a valid unicast LID for non-SM traffic types */
+static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ be32_to_cpu(IB_MULTICAST_LID_BASE))
+ return false;
+ } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ return false;
+ }
+ return true;
+}
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..6538a5cc27b6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len);
+/**
+ * rdma_read_gids - Return the SGID and DGID used for establishing
+ * connection. This can be used after rdma_resolve_addr()
+ * on client side. This can be use on new connection
+ * on server side. This is applicable to IB, RoCE, iWarp.
+ * If cm_id is not bound yet to the RDMA device, it doesn't
+ * copy and SGID or DGID to the given pointers.
+ * @id: Communication identifier whose GIDs are queried.
+ * @sgid: Pointer to SGID where SGID will be returned. It is optional.
+ * @dgid: Pointer to DGID where DGID will be returned. It is optional.
+ * Note: This API should not be used by any new ULPs or new code.
+ * Instead, users interested in querying GIDs should refer to path record
+ * of the rdma_cm_id to query the GIDs.
+ * This API is provided for compatibility for existing users.
+ */
+
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
#include <rdma/rdma_cm.h>
/**
- * rdma_set_ib_paths - Manually sets the path records used to establish a
+ * rdma_set_ib_path - Manually sets the path record used to establish a
* connection.
* @id: Connection identifier associated with the request.
* @path_rec: Reference to the path record
*
* This call permits a user to specify routing information for rdma_cm_id's
- * bound to Infiniband devices. It is called on the client side of a
+ * bound to InfiniBand devices. It is called on the client side of a
* connection and replaces the call to rdma_resolve_route.
*/
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths);
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..4118324a0310 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
- * Returns a string to represent the device for which is being
- * registered. This is primarily used for error and debug messages on
- * the console.
- */
- const char * (*get_card_name)(struct rvt_dev_info *rdi);
-
- /*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
};
+/**
+ * rvt_set_ibdev_name - Craft an IB device name from client info
+ * @rdi: pointer to the client rvt_dev_info structure
+ * @name: client specific name
+ * @unit: client specific unit number.
+ */
+static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
+ const char *fmt, const char *name,
+ const int unit)
+{
+ snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+}
+
+/**
+ * rvt_get_ibdev_name - return the IB name
+ * @rdi: rdmavt device
+ *
+ * Return the registered name of the device.
+ */
+static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
+{
+ return rdi->ibdev.name;
+}
+
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..c2d81167c858
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_RESTRACK_H_
+#define _RDMA_RESTRACK_H_
+
+#include <linux/typecheck.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+/**
+ * enum rdma_restrack_type - HW objects to track
+ */
+enum rdma_restrack_type {
+ /**
+ * @RDMA_RESTRACK_PD: Protection domain (PD)
+ */
+ RDMA_RESTRACK_PD,
+ /**
+ * @RDMA_RESTRACK_CQ: Completion queue (CQ)
+ */
+ RDMA_RESTRACK_CQ,
+ /**
+ * @RDMA_RESTRACK_QP: Queue pair (QP)
+ */
+ RDMA_RESTRACK_QP,
+ /**
+ * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
+ */
+ RDMA_RESTRACK_XRCD,
+ /**
+ * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
+ */
+ RDMA_RESTRACK_MAX
+};
+
+#define RDMA_RESTRACK_HASH_BITS 8
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /*
+ * @rwsem: Read/write lock to protect lists
+ */
+ struct rw_semaphore rwsem;
+ /**
+ * @hash: global database for all resources per-device
+ */
+ DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+};
+
+/**
+ * struct rdma_restrack_entry - metadata per-entry
+ */
+struct rdma_restrack_entry {
+ /**
+ * @valid: validity indicator
+ *
+ * The entries are filled during rdma_restrack_add,
+ * can be attempted to be free during rdma_restrack_del.
+ *
+ * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
+ */
+ bool valid;
+ /*
+ * @kref: Protect destroy of the resource
+ */
+ struct kref kref;
+ /*
+ * @comp: Signal that all consumers of resource are completed their work
+ */
+ struct completion comp;
+ /**
+ * @task: owner of resource tracking entity
+ *
+ * There are two types of entities: created by user and created
+ * by kernel.
+ *
+ * This is relevant for the entities created by users.
+ * For the entities created by kernel, this pointer will be NULL.
+ */
+ struct task_struct *task;
+ /**
+ * @kern_name: name of owner for the kernel created entities.
+ */
+ const char *kern_name;
+ /**
+ * @node: hash table entry
+ */
+ struct hlist_node node;
+ /**
+ * @type: various objects in restrack database
+ */
+ enum rdma_restrack_type type;
+};
+
+/**
+ * rdma_restrack_init() - initialize resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_init(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_clean(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @res: resource entry
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ * @type: actual type of object to operate
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_is_kernel_res() - check the owner of resource
+ * @res: resource entry
+ */
+static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+{
+ return !res->task;
+}
+
+/**
+ * rdma_restrack_get() - grab to protect resource from release
+ * @res: resource entry
+ */
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_put() - relase resource
+ * @res: resource entry
+ */
+int rdma_restrack_put(struct rdma_restrack_entry *res);
+#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/**
+ * struct srp_login_req_rdma - RDMA/CM login parameters.
+ *
+ * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
+ * data. The %srp_login_req_rdma structure contains the same information as
+ * %srp_login_req but with the reserved data removed.
+ */
+struct srp_login_req_rdma {
+ u64 tag;
+ __be16 req_buf_fmt;
+ u8 req_flags;
+ u8 opcode;
+ __be32 req_it_iu_len;
+ u8 initiator_port_id[16];
+ u8 target_port_id[16];
+};
+
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..db54115be044 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -82,6 +82,15 @@ struct bnxt_re_qp_resp {
__u32 rsvd;
};
+struct bnxt_re_srq_req {
+ __u64 srqva;
+ __u64 srq_handle;
+};
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+};
+
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..04d0e67b1312 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -402,13 +402,18 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
+};
+
struct ib_uverbs_ex_create_cq {
__u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 comp_mask;
- __u32 flags;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
__u32 reserved;
};
@@ -449,7 +454,7 @@ struct ib_uverbs_wc {
__u32 vendor_err;
__u32 byte_len;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 qp_num;
@@ -765,7 +770,7 @@ struct ib_uverbs_send_wr {
__u32 opcode;
__u32 send_flags;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
union {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..7f9c37346613 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask;
- __u8 rx_hash_function;
+ __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
@@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..1111aa4e7c1e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+ MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+ MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+ MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
};
enum {
@@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
- __u8 reserved2;
+ __u8 clock_info_versions;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
+ __u32 num_dyn_bfregs;
+ __u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
@@ -280,8 +285,11 @@ struct mlx5_ib_create_qp {
__u32 rq_wqe_shift;
__u32 flags;
__u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
+ __u32 bfreg_index;
+ union {
+ __u64 sq_buf_addr;
+ __u64 access_key;
+ };
};
/* RX Hash function flags */
@@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
/* Save bits for future fields */
- MLX5_RX_HASH_INNER = 1 << 31
+ MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
@@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_modify_qp_resp {
+ __u32 response_length;
+ __u32 dctn;
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
@@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
+
+struct mlx5_ib_clock_info {
+ __u32 sign;
+ __u32 resv;
+ __u64 nsec;
+ __u64 cycles;
+ __u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __u64 mask;
+ __u64 overflow_period;
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
+ MLX5_IB_MMAP_CORE_CLOCK = 5,
+ MLX5_IB_MMAP_ALLOC_WC = 6,
+ MLX5_IB_MMAP_CLOCK_INFO = 7,
+};
+
+enum {
+ MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..17e59bec169e 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -236,6 +236,10 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_PORT_NEW,
RDMA_NLDEV_CMD_PORT_DEL,
+ RDMA_NLDEV_CMD_RES_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -303,6 +307,51 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_QP, /* nested table */
+ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
+ /*
+ * Local QPN
+ */
+ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
+ /*
+ * Remote QPN,
+ * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
+ /*
+ * Receive Queue PSN,
+ * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
+ /*
+ * Send Queue PSN
+ */
+ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
+ /*
+ * QP types as visible to RDMA/core, the reserved QPT
+ * are not exported through this interface.
+ */
+ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
+ /*
+ * Process ID which created object,
+ * in case of kernel origin, PID won't exist.
+ */
+ RDMA_NLDEV_ATTR_RES_PID, /* u32 */
+ /*
+ * The name of process created following resource.
+ * It will exist only for kernel objects.
+ * For user created objects, the user is supposed
+ * to read /proc/PID/comm file.
+ */
+ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..02ca0d0f1eb7 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
+#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
+#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
enum pvrdma_wr_opcode {
PVRDMA_WR_RDMA_WRITE,