summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/srq.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2020-11-15 14:43:11 +0300
committerJason Gunthorpe <jgg@nvidia.com>2020-11-16 23:53:30 +0300
commit8a7904a672a1d33c848e5129f886ee69e0773a2e (patch)
treed8cb36bb7a7da1682f46adac04f7aa36b205276e /drivers/infiniband/hw/mlx5/srq.c
parent878f7b31c3a7f3e48c6601ea373b8688e7e308e0 (diff)
downloadlinux-8a7904a672a1d33c848e5129f886ee69e0773a2e.tar.xz
RDMA/mlx5: Lower setting the umem's PAS for SRQ
Some of the SRQ types are created using a WQ, and the WQ requires a different parameter set to mlx5_umem_find_best_quantized_pgoff() as it has a 5 bit page_offset. Add the umem to the mlx5_srq_attr and defer computing the PAS data until the code has figured out what kind of mailbox to use. Compute the PAS directly from the umem for each of the four unique mailbox types. This also avoids allocating memory to store the user PAS, instead it is written directly to the mailbox as in most other cases. Fixes: 01949d0109ee ("net/mlx5_core: Enable XRCs and SRQs when using ISSI > 0") Link: https://lore.kernel.org/r/20201115114311.136250-8-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/srq.c')
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c27
1 files changed, 2 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 7dfdc9e54866..fab6736e4d6a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -51,8 +51,6 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata, struct mlx5_ib_ucontext, ibucontext);
size_t ucmdlen;
int err;
- unsigned int page_offset_quantized;
- unsigned int page_size;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
ucmdlen = min(udata->inlen, sizeof(ucmd));
@@ -84,32 +82,14 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
err = PTR_ERR(srq->umem);
return err;
}
-
- page_size = mlx5_umem_find_best_quantized_pgoff(
- srq->umem, srqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
- page_offset, 64, &page_offset_quantized);
- if (!page_size) {
- mlx5_ib_warn(dev, "bad offset\n");
- goto err_umem;
- }
-
- in->pas = kvcalloc(ib_umem_num_dma_blocks(srq->umem, page_size),
- sizeof(*in->pas), GFP_KERNEL);
- if (!in->pas) {
- err = -ENOMEM;
- goto err_umem;
- }
-
- mlx5_ib_populate_pas(srq->umem, page_size, in->pas, 0);
+ in->umem = srq->umem;
err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
- goto err_in;
+ goto err_umem;
}
- in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
- in->page_offset = page_offset_quantized;
in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type != IB_SRQT_BASIC)
@@ -117,9 +97,6 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return 0;
-err_in:
- kvfree(in->pas);
-
err_umem:
ib_umem_release(srq->umem);