diff options
| author | Leon Romanovsky <leonro@nvidia.com> | 2026-03-23 23:10:18 +0300 |
|---|---|---|
| committer | Leon Romanovsky <leon@kernel.org> | 2026-03-30 20:47:45 +0300 |
| commit | 179b32095854d44749dd535502f05d95bbf43775 (patch) | |
| tree | dfda9d51050e78097a44a9e48e817593a7a77646 | |
| parent | e6fd2491789745ed8c3df86a660dfa7207129d22 (diff) | |
| download | linux-179b32095854d44749dd535502f05d95bbf43775.tar.xz | |
RDMA/umem: Use consistent DMA attributes when unmapping entries
The DMA API expects that mapping and unmapping use the same DMA
attributes. The RDMA umem code did not meet this requirement, so fix
the mismatch.
Fixes: f03d9fadfe13 ("RDMA/core: Add weak ordering dma attr to dma mapping")
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
| -rw-r--r-- | drivers/infiniband/core/umem.c | 13 | ||||
| -rw-r--r-- | include/rdma/ib_umem.h | 1 |
2 files changed, 7 insertions, 7 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 1b6c28f090e3..786fa1aa8e55 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -55,8 +55,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d if (dirty) ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, - DMA_BIDIRECTIONAL, - DMA_ATTR_REQUIRE_COHERENT); + DMA_BIDIRECTIONAL, umem->dma_attrs); for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) { unpin_user_page_range_dirty_lock(sg_page(sg), @@ -170,7 +169,6 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, unsigned long lock_limit; unsigned long new_pinned; unsigned long cur_base; - unsigned long dma_attr = DMA_ATTR_REQUIRE_COHERENT; struct mm_struct *mm; unsigned long npages; int pinned, ret; @@ -203,6 +201,10 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, umem->iova = addr; umem->writable = ib_access_writable(access); umem->owning_mm = mm = current->mm; + umem->dma_attrs = DMA_ATTR_REQUIRE_COHERENT; + if (access & IB_ACCESS_RELAXED_ORDERING) + umem->dma_attrs |= DMA_ATTR_WEAK_ORDERING; + mmgrab(mm); page_list = (struct page **) __get_free_page(GFP_KERNEL); @@ -255,11 +257,8 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, } } - if (access & IB_ACCESS_RELAXED_ORDERING) - dma_attr |= DMA_ATTR_WEAK_ORDERING; - ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt, - DMA_BIDIRECTIONAL, dma_attr); + DMA_BIDIRECTIONAL, umem->dma_attrs); if (ret) goto umem_release; goto out; diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 38414281a686..2ad52cc1d52b 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -18,6 +18,7 @@ struct ib_umem { u64 iova; size_t length; unsigned long address; + unsigned long dma_attrs; u32 writable : 1; u32 is_odp : 1; u32 is_dmabuf : 1; |
