diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 81 |
1 files changed, 42 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 44a0ee6bd9f1..6fa0a83c19de 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -147,7 +147,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) break; } mr->order = ent->order; - mr->allocated_from_cache = 1; + mr->allocated_from_cache = true; mr->dev = dev; MLX5_SET(mkc, mkc, free, 1); @@ -661,12 +661,21 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, struct ib_pd *pd) { + struct mlx5_ib_dev *dev = to_mdev(pd->device); + MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC)); MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE)); MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ)); MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE)); MLX5_SET(mkc, mkc, lr, 1); + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) + MLX5_SET(mkc, mkc, relaxed_ordering_write, + !!(acc & IB_ACCESS_RELAXED_ORDERING)); + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) + MLX5_SET(mkc, mkc, relaxed_ordering_read, + !!(acc & IB_ACCESS_RELAXED_ORDERING)); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET64(mkc, mkc, start_addr, start_addr); @@ -867,36 +876,6 @@ static struct mlx5_ib_mr *alloc_mr_from_cache( return mr; } -static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, - void *xlt, int page_shift, size_t size, - int flags) -{ - struct mlx5_ib_dev *dev = mr->dev; - struct ib_umem *umem = mr->umem; - - if (flags & MLX5_IB_UPD_XLT_INDIRECT) { - if (!umr_can_use_indirect_mkey(dev)) - return -EPERM; - mlx5_odp_populate_klm(xlt, idx, npages, mr, flags); - return npages; - } - - npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); - - if (!(flags & MLX5_IB_UPD_XLT_ZAP)) { - __mlx5_ib_populate_pas(dev, umem, page_shift, - idx, npages, xlt, - MLX5_IB_MTT_PRESENT); - /* Clear padding after the pages - * brought from the umem. - */ - memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, - size - npages * sizeof(struct mlx5_mtt)); - } - - return npages; -} - #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \ MLX5_UMR_MTT_ALIGNMENT) #define MLX5_SPARE_UMR_CHUNK 0x10000 @@ -920,6 +899,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, size_t pages_mapped = 0; size_t pages_to_map = 0; size_t pages_iter = 0; + size_t size_to_map = 0; gfp_t gfp; bool use_emergency_page = false; @@ -966,6 +946,15 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, goto free_xlt; } + if (mr->umem->is_odp) { + if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) { + struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); + size_t max_pages = ib_umem_odp_num_pages(odp) - idx; + + pages_to_map = min_t(size_t, pages_to_map, max_pages); + } + } + sg.addr = dma; sg.lkey = dev->umrc.pd->local_dma_lkey; @@ -988,14 +977,22 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, pages_mapped < pages_to_map && !err; pages_mapped += pages_iter, idx += pages_iter) { npages = min_t(int, pages_iter, pages_to_map - pages_mapped); + size_to_map = npages * desc_size; dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE); - npages = populate_xlt(mr, idx, npages, xlt, - page_shift, size, flags); - + if (mr->umem->is_odp) { + mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); + } else { + __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx, + npages, xlt, + MLX5_IB_MTT_PRESENT); + /* Clear padding after the pages + * brought from the umem. + */ + memset(xlt + size_to_map, 0, size - size_to_map); + } dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); - sg.length = ALIGN(npages * desc_size, - MLX5_UMR_MTT_ALIGNMENT); + sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT); if (pages_mapped + pages_iter >= pages_to_map) { if (flags & MLX5_IB_UPD_XLT_ENABLE) @@ -1074,6 +1071,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, free, !populate); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) + MLX5_SET(mkc, mkc, relaxed_ordering_write, + !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); + if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) + MLX5_SET(mkc, mkc, relaxed_ordering_read, + !!(access_flags & IB_ACCESS_RELAXED_ORDERING)); MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); @@ -1264,7 +1267,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - use_umr = mlx5_ib_can_use_umr(dev, true); + use_umr = mlx5_ib_can_use_umr(dev, true, access_flags); if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, @@ -1431,7 +1434,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, goto err; } - if (!mlx5_ib_can_use_umr(dev, true) || + if (!mlx5_ib_can_use_umr(dev, true, access_flags) || (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) { /* * UMR can't be used - MKey needs to be replaced. @@ -1452,7 +1455,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, goto err; } - mr->allocated_from_cache = 0; + mr->allocated_from_cache = false; } else { /* * Send a UMR WQE |