summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Stevens <stevensd@chromium.org>2021-09-29 05:32:54 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-11-18 21:16:49 +0300
commitd39f447f259aea9310e606ab40ae90eb4fde43d9 (patch)
tree68050a6b5d34328a86ceb1c01b4ae8dba5bc8590
parente9d8cb8dad2da04a1278bf2a4e75d4a17ceb4b7f (diff)
downloadlinux-d39f447f259aea9310e606ab40ae90eb4fde43d9.tar.xz
iommu/dma: Fix sync_sg with swiotlb
[ Upstream commit 08ae5d4a1ae96b72222e7b02d072bb997ff29dac ] The is_swiotlb_buffer function takes the physical address of the swiotlb buffer, not the physical address of the original buffer. The sglist contains the physical addresses of the original buffer, so for the sync_sg functions to work properly when a bounce buffer might have been used, we need to use iommu_iova_to_phys to look up the physical address. This is what sync_single does, so call that function on each sglist segment. The previous code mostly worked because swiotlb does the transfer on map and unmap. However, any callers which use DMA_ATTR_SKIP_CPU_SYNC with sglists or which call sync_sg would not have had anything copied to the bounce buffer. Fixes: 82612d66d51d ("iommu: Allow the iommu/dma api to use bounce buffers") Signed-off-by: David Stevens <stevensd@chromium.org> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20210929023300.335969-2-stevensd@google.com Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/iommu/dma-iommu.c33
1 files changed, 13 insertions, 20 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 896bea04c347..c4d205b63c58 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -828,17 +828,13 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (!dev_is_dma_coherent(dev))
+ if (dev_is_untrusted(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
-
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
- sg->length, dir);
- }
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -848,17 +844,14 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_device(dev, sg_phys(sg),
- sg->length, dir);
-
- if (!dev_is_dma_coherent(dev))
+ if (dev_is_untrusted(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_device(dev,
+ sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
- }
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,