diff options
author | Yishai Hadas <yishaih@nvidia.com> | 2020-09-30 19:38:28 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2020-10-01 22:44:44 +0300 |
commit | a03bfc37d59de316436c46f5691c5a972ed57c82 (patch) | |
tree | e6bcbb94a36904422bf2ab61fd8969df5db6966d /drivers/infiniband | |
parent | 677cf51f71c97bcf98852aa2077d7289bc73e3b3 (diff) | |
download | linux-a03bfc37d59de316436c46f5691c5a972ed57c82.tar.xz |
RDMA/mlx5: Sync device with CPU pages upon ODP MR registration
Sync device with CPU pages upon ODP MR registration. mlx5 already has to
zero the HW's version of the PAS list, may as well deliver a PAS list that
matches the current CPU page tables configuration.
Link: https://lore.kernel.org/r/20200930163828.1336747-5-leon@kernel.org
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 22 |
3 files changed, 32 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6ab3efb75b21..b1f2b34e5955 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1283,6 +1283,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 flags, struct ib_sge *sg_list, u32 num_sge); +int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { @@ -1304,6 +1305,10 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, { return -EOPNOTSUPP; } +static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ extern const struct mmu_interval_notifier_ops mlx5_mn_ops; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 151b14038765..b261797b258f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1421,7 +1421,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mr->umem = umem; set_mr_fields(dev, mr, npages, length, access_flags); - if (xlt_with_umr) { + if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) { /* * If the MR was created with reg_create then it will be * configured properly but left disabled. It is safe to go ahead @@ -1429,9 +1429,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, */ int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; - if (access_flags & IB_ACCESS_ON_DEMAND) - update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; - err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, update_xlt_flags); if (err) { @@ -1451,6 +1448,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, dereg_mr(dev, mr); return ERR_PTR(err); } + + err = mlx5_ib_init_odp_mr(mr, xlt_with_umr); + if (err) { + dereg_mr(dev, mr); + return ERR_PTR(err); + } } return &mr->ibmr; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d01fdec05b89..5c853ec1b0d8 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -666,6 +666,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr) #define MLX5_PF_FLAGS_DOWNGRADE BIT(1) #define MLX5_PF_FLAGS_SNAPSHOT BIT(2) +#define MLX5_PF_FLAGS_ENABLE BIT(3) static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, u64 user_va, size_t bcnt, u32 *bytes_mapped, u32 flags) @@ -675,6 +676,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, u64 access_mask; u64 start_idx; bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT); + u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC; + + if (flags & MLX5_PF_FLAGS_ENABLE) + xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; page_shift = odp->page_shift; start_idx = (user_va - ib_umem_start(odp)) >> page_shift; @@ -691,8 +696,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, * No need to check whether the MTTs really belong to this MR, since * ib_umem_odp_map_dma_and_lock already checks this. */ - ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, - MLX5_IB_UPD_XLT_ATOMIC); + ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, xlt_flags); mutex_unlock(&odp->umem_mutex); if (ret < 0) { @@ -827,6 +831,20 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, flags); } +int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) +{ + u32 flags = MLX5_PF_FLAGS_SNAPSHOT; + int ret; + + if (enable) + flags |= MLX5_PF_FLAGS_ENABLE; + + ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), + mr->umem->address, mr->umem->length, NULL, + flags); + return ret >= 0 ? 0 : ret; +} + struct pf_frame { struct pf_frame *next; u32 key; |