summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2016-08-09 19:31:35 +0300
committerJoerg Roedel <jroedel@suse.de>2016-08-10 13:02:02 +0300
commitc987ff0d3cb37d7fe1ddaa370811dfd9f73643fa (patch)
treefe17488dd0ef70dd4d7c20e854ff6617280320af
parent3ec60043f7c02e1f79e4a90045ff2d2e80042941 (diff)
downloadlinux-c987ff0d3cb37d7fe1ddaa370811dfd9f73643fa.tar.xz
iommu/dma: Respect IOMMU aperture when allocating
Where a device driver has set a 64-bit DMA mask to indicate the absence of addressing limitations, we still need to ensure that we don't allocate IOVAs beyond the actual input size of the IOMMU. The reported aperture is the most reliable way we have of inferring that input address size, so use that to enforce a hard upper limit where available. Fixes: 0db2e5d18f76 ("iommu: Implement common IOMMU ops for DMA mapping") Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/dma-iommu.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 7d991c81c4fa..00c8a08d56e7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -152,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
}
}
-static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
+static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
dma_addr_t dma_limit)
{
+ struct iova_domain *iovad = domain->iova_cookie;
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
+ if (domain->geometry.force_aperture)
+ dma_limit = min(dma_limit, domain->geometry.aperture_end);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
@@ -315,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
+ iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
if (!iova)
goto out_free_pages;
@@ -387,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
+ struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
if (!iova)
return DMA_ERROR_CODE;
@@ -539,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
+ iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
if (!iova)
goto out_restore_sg;