diff options
author | Saeed Mahameed <saeedm@nvidia.com> | 2021-10-27 22:45:37 +0300 |
---|---|---|
committer | Saeed Mahameed <saeedm@nvidia.com> | 2021-10-28 07:33:28 +0300 |
commit | 573bce9e675b0654e18a338ca9a64187fc19806f (patch) | |
tree | fe4fbc94bd65edc21c81e627b89e143aceea22e5 /drivers/infiniband/hw/mlx5/odp.c | |
parent | f25c0515c521375154c62c72447869f40218c861 (diff) | |
parent | 60dd57c7479418e2bc902143eb46a2fdcfeecbbb (diff) | |
download | linux-573bce9e675b0654e18a338ca9a64187fc19806f.tar.xz |
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux into net-next
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/odp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 38 |
1 files changed, 10 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 81147d774dd2..b1e2725f4586 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, mr->umem = &odp->umem; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; + mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; mr->parent = imr; odp->private = mr; @@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, } imr->ibmr.pd = &pd->ibpd; - imr->mmkey.iova = 0; + imr->ibmr.iova = 0; imr->umem = &umem_odp->umem; imr->ibmr.lkey = imr->mmkey.key; imr->ibmr.rkey = imr->mmkey.key; @@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - if (unlikely(io_virt < mr->mmkey.iova)) + if (unlikely(io_virt < mr->ibmr.iova)) return -EFAULT; if (mr->umem->is_dmabuf) @@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, if (!odp->is_implicit_odp) { u64 user_va; - if (check_add_overflow(io_virt - mr->mmkey.iova, + if (check_add_overflow(io_virt - mr->ibmr.iova, (u64)odp->umem.address, &user_va)) return -EFAULT; if (unlikely(user_va >= ib_umem_end(odp) || @@ -788,7 +788,7 @@ struct pf_frame { int depth; }; -static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) +static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) { if (!mmkey) return false; @@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) return mmkey->key == key; } -static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) -{ - struct mlx5_ib_mw *mw; - struct mlx5_ib_devx_mr *devx_mr; - - if (mmkey->type == MLX5_MKEY_MW) { - mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); - return mw->ndescs; - } - - devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr, - mmkey); - return devx_mr->ndescs; -} - /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, { int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; struct pf_frame *head = NULL, *frame; - struct mlx5_core_mkey *mmkey; + struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; struct mlx5_klm *pklm; u32 *out = NULL; size_t offset; - int ndescs; io_virt += *bytes_committed; bcnt -= *bytes_committed; @@ -885,8 +869,6 @@ next_mr: case MLX5_MKEY_MW: case MLX5_MKEY_INDIRECT_DEVX: - ndescs = get_indirect_num_descs(mmkey); - if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { mlx5_ib_dbg(dev, "indirection level exceeded\n"); ret = -EFAULT; @@ -894,7 +876,7 @@ next_mr: } outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + - sizeof(*pklm) * (ndescs - 2); + sizeof(*pklm) * (mmkey->ndescs - 2); if (outlen > cur_outlen) { kfree(out); @@ -909,14 +891,14 @@ next_mr: pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, bsf0_klm0_pas_mtt0_1); - ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen); + ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); if (ret) goto end; offset = io_virt - MLX5_GET64(query_mkey_out, out, memory_key_mkey_entry.start_addr); - for (i = 0; bcnt && i < ndescs; i++, pklm++) { + for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { if (offset >= be32_to_cpu(pklm->bcount)) { offset -= be32_to_cpu(pklm->bcount); continue; @@ -1704,8 +1686,8 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 lkey) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr = NULL; + struct mlx5_ib_mkey *mmkey; xa_lock(&dev->odp_mkeys); mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); |