summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@nvidia.com>2026-01-31 08:34:18 +0300
committerChristian König <christian.koenig@amd.com>2026-02-23 21:51:41 +0300
commit8c5f9645c3893f0db679d9affe4fe4e665b990dd (patch)
tree559864b5593de3697db87bd902482827b9255544
parent6ffe939bf94fbbeaaa4050a0e9c22820e959b15f (diff)
downloadlinux-8c5f9645c3893f0db679d9affe4fe4e665b990dd.tar.xz
iommufd: Add dma_buf_pin()
IOMMUFD relies on a private protocol with VFIO, and this always operated in pinned mode. Now that VFIO can support pinned importers update IOMMUFD to invoke the normal dma-buf flow to request pin. This isn't enough to allow IOMMUFD to work with other exporters, it still needs a way to get the physical address list which is another series. IOMMUFD supports the defined revoke semantics. It immediately stops and fences access to the memory inside it's invalidate_mappings() callback, and it currently doesn't use scatterlists so doesn't call map/unmap at all. It is expected that a future revision can synchronously call unmap from the move_notify callback as well. Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20260131-dmabuf-revoke-v7-8-463d956bd527@nvidia.com
-rw-r--r--drivers/iommu/iommufd/pages.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 1d41ac625c98..8124c554f2cc 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -1502,16 +1502,22 @@ static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
mutex_unlock(&pages->mutex);
}
- rc = sym_vfio_pci_dma_buf_iommufd_map(attach, &pages->dmabuf.phys);
+ rc = dma_buf_pin(attach);
if (rc)
goto err_detach;
+ rc = sym_vfio_pci_dma_buf_iommufd_map(attach, &pages->dmabuf.phys);
+ if (rc)
+ goto err_unpin;
+
dma_resv_unlock(dmabuf->resv);
/* On success iopt_release_pages() will detach and put the dmabuf. */
pages->dmabuf.attach = attach;
return 0;
+err_unpin:
+ dma_buf_unpin(attach);
err_detach:
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, attach);
@@ -1657,6 +1663,7 @@ void iopt_release_pages(struct kref *kref)
if (iopt_is_dmabuf(pages) && pages->dmabuf.attach) {
struct dma_buf *dmabuf = pages->dmabuf.attach->dmabuf;
+ dma_buf_unpin(pages->dmabuf.attach);
dma_buf_detach(dmabuf, pages->dmabuf.attach);
dma_buf_put(dmabuf);
WARN_ON(!list_empty(&pages->dmabuf.tracker));