summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@nvidia.com>2026-01-31 08:34:14 +0300
committerChristian König <christian.koenig@amd.com>2026-02-23 18:30:16 +0300
commit1a8a5227f22996d3e503c60569b1813a404da033 (patch)
tree2965b2eafd3b8ed0875fd0e6d37df5a6eaac2a1a
parenta408c0ca0c411ca1ead995bdae3112a806c87556 (diff)
downloadlinux-1a8a5227f22996d3e503c60569b1813a404da033.tar.xz
vfio: Wait for dma-buf invalidation to complete
dma-buf invalidation is handled asynchronously by the hardware, so VFIO must wait until all affected objects have been fully invalidated. In addition, the dma-buf exporter is expecting that all importers unmap any buffers they previously mapped. Fixes: 5d74781ebc86 ("vfio/pci: Add dma-buf export support for MMIO regions") Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Alex Williamson <alex@shazbot.org> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20260131-dmabuf-revoke-v7-4-463d956bd527@nvidia.com
-rw-r--r--drivers/vfio/pci/vfio_pci_dmabuf.c61
1 files changed, 57 insertions, 4 deletions
diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c
index 6633c2f86173..d7712d68ff2b 100644
--- a/drivers/vfio/pci/vfio_pci_dmabuf.c
+++ b/drivers/vfio/pci/vfio_pci_dmabuf.c
@@ -17,6 +17,8 @@ struct vfio_pci_dma_buf {
struct phys_vec *phys_vec;
struct p2pdma_provider *provider;
u32 nr_ranges;
+ struct kref kref;
+ struct completion comp;
u8 revoked : 1;
};
@@ -44,27 +46,46 @@ static int vfio_pci_dma_buf_attach(struct dma_buf *dmabuf,
return 0;
}
+static void vfio_pci_dma_buf_done(struct kref *kref)
+{
+ struct vfio_pci_dma_buf *priv =
+ container_of(kref, struct vfio_pci_dma_buf, kref);
+
+ complete(&priv->comp);
+}
+
static struct sg_table *
vfio_pci_dma_buf_map(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+ struct sg_table *ret;
dma_resv_assert_held(priv->dmabuf->resv);
if (priv->revoked)
return ERR_PTR(-ENODEV);
- return dma_buf_phys_vec_to_sgt(attachment, priv->provider,
- priv->phys_vec, priv->nr_ranges,
- priv->size, dir);
+ ret = dma_buf_phys_vec_to_sgt(attachment, priv->provider,
+ priv->phys_vec, priv->nr_ranges,
+ priv->size, dir);
+ if (IS_ERR(ret))
+ return ret;
+
+ kref_get(&priv->kref);
+ return ret;
}
static void vfio_pci_dma_buf_unmap(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction dir)
{
+ struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+
+ dma_resv_assert_held(priv->dmabuf->resv);
+
dma_buf_free_sgt(attachment, sgt, dir);
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
}
static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf)
@@ -286,6 +307,9 @@ int vfio_pci_core_feature_dma_buf(struct vfio_pci_core_device *vdev, u32 flags,
goto err_dev_put;
}
+ kref_init(&priv->kref);
+ init_completion(&priv->comp);
+
/* dma_buf_put() now frees priv */
INIT_LIST_HEAD(&priv->dmabufs_elm);
down_write(&vdev->memory_lock);
@@ -330,9 +354,33 @@ void vfio_pci_dma_buf_move(struct vfio_pci_core_device *vdev, bool revoked)
if (priv->revoked != revoked) {
dma_resv_lock(priv->dmabuf->resv, NULL);
- priv->revoked = revoked;
+ if (revoked)
+ priv->revoked = true;
dma_buf_invalidate_mappings(priv->dmabuf);
+ dma_resv_wait_timeout(priv->dmabuf->resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(priv->dmabuf->resv);
+ if (revoked) {
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
+ wait_for_completion(&priv->comp);
+ } else {
+ /*
+ * Kref is initialize again, because when revoke
+ * was performed the reference counter was decreased
+ * to zero to trigger completion.
+ */
+ kref_init(&priv->kref);
+ /*
+ * There is no need to wait as no mapping was
+ * performed when the previous status was
+ * priv->revoked == true.
+ */
+ reinit_completion(&priv->comp);
+ dma_resv_lock(priv->dmabuf->resv, NULL);
+ priv->revoked = false;
+ dma_resv_unlock(priv->dmabuf->resv);
+ }
}
fput(priv->dmabuf->file);
}
@@ -353,7 +401,12 @@ void vfio_pci_dma_buf_cleanup(struct vfio_pci_core_device *vdev)
priv->vdev = NULL;
priv->revoked = true;
dma_buf_invalidate_mappings(priv->dmabuf);
+ dma_resv_wait_timeout(priv->dmabuf->resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(priv->dmabuf->resv);
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
+ wait_for_completion(&priv->comp);
vfio_device_put_registration(&vdev->vdev);
fput(priv->dmabuf->file);
}