diff options
author | Takeshi Kihara <takeshi.kihara.df@renesas.com> | 2017-01-11 13:11:17 +0300 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2017-01-12 18:34:15 +0300 |
commit | 7f332fc1f0f053799aeda587cefbc67b5ecd72c5 (patch) | |
tree | 4669589f4875bb121bc1f5dff4b2d41502b3aa82 /arch/arm64 | |
parent | ec6d06efb0bac6cd92846e42d1afe5b98b57e7c2 (diff) | |
download | linux-7f332fc1f0f053799aeda587cefbc67b5ecd72c5.tar.xz |
arm64: Add support for DMA_ATTR_SKIP_CPU_SYNC attribute to swiotlb
This patch adds support for DMA_ATTR_SKIP_CPU_SYNC attribute for
dma_{un}map_{page,sg} functions family to swiotlb.
DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
the CPU cache for the given buffer assuming that it has been already
transferred to 'device' domain.
Ported from IOMMU .{un}map_{sg,page} ops.
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Takeshi Kihara <takeshi.kihara.df@renesas.com>
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index e04082700bb1..1d7d5d2881db 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -211,7 +211,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, dma_addr_t dev_addr; dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); return dev_addr; @@ -222,7 +223,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); } @@ -235,7 +237,8 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int i, ret; ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, ret, i) __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); @@ -251,7 +254,8 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sg; int i; - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); |