summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2021-11-05 14:10:27 +0300
committerJiri Kosina <jkosina@suse.cz>2021-11-05 14:10:27 +0300
commit820e9906cf64142169134f35b996108303cf22ca (patch)
tree89f8831fb39c59aba208395d91e25ab4b26f473f /include/rdma
parentb9865081a56a5cd01cd7c9735911709ff82bd8df (diff)
parent2ea5999d07d2a0ab6ad92ccf65524707f2c5e456 (diff)
downloadlinux-820e9906cf64142169134f35b996108303cf22ca.tar.xz
Merge branch 'for-5.16/asus' into for-linus
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_sa.h24
-rw-r--r--include/rdma/ib_umem.h11
-rw-r--r--include/rdma/ib_verbs.h74
-rw-r--r--include/rdma/rdmavt_qp.h2
4 files changed, 71 insertions, 40 deletions
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index ba3c808a3789..3634d4cc7a56 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -366,20 +366,6 @@ struct ib_sa_mcmember_rec {
#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
-struct ib_sa_service_rec {
- u64 id;
- union ib_gid gid;
- __be16 pkey;
- /* reserved */
- u32 lease;
- u8 key[16];
- u8 name[64];
- u8 data8[16];
- u16 data16[8];
- u32 data32[4];
- u64 data64[2];
-};
-
#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
@@ -430,16 +416,6 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
void *context),
void *context, struct ib_sa_query **query);
-int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u32 port_num, u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
- gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context, struct ib_sa_query **sa_query);
-
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
ib_sa_comp_mask comp_mask;
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 676c57f5ca80..5ae9dff74dac 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -26,9 +26,7 @@ struct ib_umem {
u32 is_odp : 1;
u32 is_dmabuf : 1;
struct work_struct work;
- struct sg_table sg_head;
- int nmap;
- unsigned int sg_nents;
+ struct sg_append_table sgt_append;
};
struct ib_umem_dmabuf {
@@ -56,7 +54,7 @@ static inline int ib_umem_offset(struct ib_umem *umem)
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
unsigned long pgsz)
{
- return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) &
+ return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
(pgsz - 1);
}
@@ -77,7 +75,8 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
struct ib_umem *umem,
unsigned long pgsz)
{
- __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+ __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+ umem->sgt_append.sgt.nents, pgsz);
}
/**
@@ -128,7 +127,7 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
- struct scatterlist *sg = umem->sg_head.sgl;
+ struct scatterlist *sg = umem->sgt_append.sgt.sgl;
dma_addr_t dma_addr;
dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 371df1c80aeb..4b50d9a3018a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2268,8 +2268,13 @@ struct iw_cm_conn_param;
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
-#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
- ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
+#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ gfp, false))
+
+#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
+ ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
+ GFP_KERNEL, true))
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
@@ -2435,9 +2440,8 @@ struct ib_device_ops {
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
- struct ib_qp *(*create_qp)(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- struct ib_udata *udata);
+ int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
@@ -2635,11 +2639,18 @@ struct ib_device_ops {
int (*query_ucontext)(struct ib_ucontext *context,
struct uverbs_attr_bundle *attrs);
+ /*
+ * Provide NUMA node. This API exists for rdmavt/hfi1 only.
+ * Everyone else relies on Linux memory management model.
+ */
+ int (*get_numa_node)(struct ib_device *dev);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_qp);
DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
@@ -2746,6 +2757,15 @@ struct ib_device {
u32 lag_flags;
};
+static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
+ gfp_t gfp, bool is_numa_aware)
+{
+ if (is_numa_aware && dev->ops.get_numa_node)
+ return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
+
+ return kzalloc(size, gfp);
+}
+
struct ib_client_nl_info;
struct ib_client {
const char *name;
@@ -3668,13 +3688,21 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
bad_recv_wr ? : &dummy);
}
-struct ib_qp *ib_create_named_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr,
- const char *caller);
+struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ const char *caller);
+/**
+ * ib_create_qp - Creates a kernel QP associated with the specific protection
+ * domain.
+ * @pd: The protection domain associated with the QP.
+ * @init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ */
static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr)
{
- return ib_create_named_qp(pd, init_attr, KBUILD_MODNAME);
+ return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
}
/**
@@ -4058,6 +4086,34 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
}
/**
+ * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
+ * @dev: The device for which the DMA addresses are to be created
+ * @sg: The sg_table object describing the buffer
+ * @direction: The direction of the DMA
+ * @attrs: Optional DMA attributes for the map operation
+ */
+static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (ib_uses_virt_dma(dev)) {
+ ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+ return 0;
+ }
+ return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
+static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction direction,
+ unsigned long dma_attrs)
+{
+ if (!ib_uses_virt_dma(dev))
+ dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
+/**
* ib_dma_map_sg - Map a scatter/gather list to DMA addresses
* @dev: The device for which the DMA addresses are to be created
* @sg: The array of scatter/gather entries
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 8275954f5ce6..2e58d5e6ac0e 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -444,7 +444,7 @@ struct rvt_qp {
/*
* This sge list MUST be last. Do not add anything below here.
*/
- struct rvt_sge r_sg_list[] /* verified SGEs */
+ struct rvt_sge *r_sg_list /* verified SGEs */
____cacheline_aligned_in_smp;
};