diff options
author | Steve Sistare <steven.sistare@oracle.com> | 2023-01-31 19:58:03 +0300 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2023-02-09 21:39:14 +0300 |
commit | ef3a3f6a294ba65fd906a291553935881796f8a5 (patch) | |
tree | d6b30055cb984403a0b05847271bba9d7dbfb045 /drivers/vfio | |
parent | 168a9c91fe0a1180959b6394f4566de7080244b6 (diff) | |
download | linux-ef3a3f6a294ba65fd906a291553935881796f8a5.tar.xz |
vfio/type1: exclude mdevs from VFIO_UPDATE_VADDR
Disable the VFIO_UPDATE_VADDR capability if mediated devices are present.
Their kernel threads could be blocked indefinitely by a misbehaving
userland while trying to pin/unpin pages while vaddrs are being updated.
Do not allow groups to be added to the container while vaddr's are invalid,
so we never need to block user threads from pinning, and can delete the
vaddr-waiting code in a subsequent patch.
Fixes: c3cbab24db38 ("vfio/type1: implement interfaces to update vaddr")
Cc: stable@vger.kernel.org
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1675184289-267876-2-git-send-email-steven.sistare@oracle.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 44 |
1 files changed, 42 insertions, 2 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 23c24fe98c00..144f5bb20fb8 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -861,6 +861,12 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data, mutex_lock(&iommu->lock); + if (WARN_ONCE(iommu->vaddr_invalid_count, + "vfio_pin_pages not allowed with VFIO_UPDATE_VADDR\n")) { + ret = -EBUSY; + goto pin_done; + } + /* * Wait for all necessary vaddr's to be valid so they can be used in * the main loop without dropping the lock, to avoid racing vs unmap. @@ -1343,6 +1349,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu, mutex_lock(&iommu->lock); + /* Cannot update vaddr if mdev is present. */ + if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) { + ret = -EBUSY; + goto unlock; + } + pgshift = __ffs(iommu->pgsize_bitmap); pgsize = (size_t)1 << pgshift; @@ -2185,11 +2197,16 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_domain_geometry *geo; LIST_HEAD(iova_copy); LIST_HEAD(group_resv_regions); - int ret = -EINVAL; + int ret = -EBUSY; mutex_lock(&iommu->lock); + /* Attach could require pinning, so disallow while vaddr is invalid. */ + if (iommu->vaddr_invalid_count) + goto out_unlock; + /* Check for duplicates */ + ret = -EINVAL; if (vfio_iommu_find_iommu_group(iommu, iommu_group)) goto out_unlock; @@ -2660,6 +2677,16 @@ static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu) return ret; } +static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu) +{ + bool ret; + + mutex_lock(&iommu->lock); + ret = !list_empty(&iommu->emulated_iommu_groups); + mutex_unlock(&iommu->lock); + return ret; +} + static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, unsigned long arg) { @@ -2668,8 +2695,13 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, case VFIO_TYPE1v2_IOMMU: case VFIO_TYPE1_NESTING_IOMMU: case VFIO_UNMAP_ALL: - case VFIO_UPDATE_VADDR: return 1; + case VFIO_UPDATE_VADDR: + /* + * Disable this feature if mdevs are present. They cannot + * safely pin/unpin/rw while vaddrs are being updated. + */ + return iommu && !vfio_iommu_has_emulated(iommu); case VFIO_DMA_CC_IOMMU: if (!iommu) return 0; @@ -3138,6 +3170,13 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, size_t done; mutex_lock(&iommu->lock); + + if (WARN_ONCE(iommu->vaddr_invalid_count, + "vfio_dma_rw not allowed with VFIO_UPDATE_VADDR\n")) { + ret = -EBUSY; + goto out; + } + while (count > 0) { ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, count, write, &done); @@ -3149,6 +3188,7 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, user_iova += done; } +out: mutex_unlock(&iommu->lock); return ret; } |