summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-10-16 04:13:34 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-10-16 04:13:34 +0400
commitba0a062ef55a6f8e8361a63ee48e654879ba00bf (patch)
tree6d7cdb70158ffd2be9b5165074c74c3333bc6210 /arch
parentb83aea88d3ab24a427351b496e64f0725ab09bc3 (diff)
parentc9b24996d5da1bf7d2bebab5770dfcc7834c53b7 (diff)
downloadlinux-ba0a062ef55a6f8e8361a63ee48e654879ba00bf.tar.xz
Merge branch 'fixes-for-v3.12' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping fix from Marek Szyprowski: "A bugfix for the IOMMU-based implementation of dma-mapping subsystem for ARM architecture" * 'fixes-for-v3.12' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: ARM: dma-mapping: Always pass proper prot flags to iommu_map()
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/dma-mapping.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..1272ed202dde 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
break;
len = (j - i) << PAGE_SHIFT;
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ ret = iommu_map(mapping->domain, iova, phys, len,
+ IOMMU_READ|IOMMU_WRITE);
if (ret < 0)
goto fail;
iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int ret = 0;
unsigned int count;
struct scatterlist *s;
+ int prot;
size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- switch (dir) {
- case DMA_BIDIRECTIONAL:
- prot = IOMMU_READ | IOMMU_WRITE;
- break;
- case DMA_TO_DEVICE:
- prot = IOMMU_READ;
- break;
- case DMA_FROM_DEVICE:
- prot = IOMMU_WRITE;
- break;
- default:
- prot = 0;
- }
+ prot = __dma_direction_to_prot(dir);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)