diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 43 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 6 |
3 files changed, 30 insertions, 22 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f5e1a8471714..1272ed202dde 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) break; len = (j - i) << PAGE_SHIFT; - ret = iommu_map(mapping->domain, iova, phys, len, 0); + ret = iommu_map(mapping->domain, iova, phys, len, + IOMMU_READ|IOMMU_WRITE); if (ret < 0) goto fail; iova += len; @@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, GFP_KERNEL); } +static int __dma_direction_to_prot(enum dma_data_direction dir) +{ + int prot; + + switch (dir) { + case DMA_BIDIRECTIONAL: + prot = IOMMU_READ | IOMMU_WRITE; + break; + case DMA_TO_DEVICE: + prot = IOMMU_READ; + break; + case DMA_FROM_DEVICE: + prot = IOMMU_WRITE; + break; + default: + prot = 0; + } + + return prot; +} + /* * Map a part of the scatter-gather list into contiguous io address space */ @@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int ret = 0; unsigned int count; struct scatterlist *s; + int prot; size = PAGE_ALIGN(size); *handle = DMA_ERROR_CODE; @@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); - ret = iommu_map(mapping->domain, iova, phys, len, 0); + prot = __dma_direction_to_prot(dir); + + ret = iommu_map(mapping->domain, iova, phys, len, prot); if (ret < 0) goto fail; count += len >> PAGE_SHIFT; @@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p if (dma_addr == DMA_ERROR_CODE) return dma_addr; - switch (dir) { - case DMA_BIDIRECTIONAL: - prot = IOMMU_READ | IOMMU_WRITE; - break; - case DMA_TO_DEVICE: - prot = IOMMU_READ; - break; - case DMA_FROM_DEVICE: - prot = IOMMU_WRITE; - break; - default: - prot = 0; - } + prot = __dma_direction_to_prot(dir); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); if (ret < 0) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index febaee7ca57b..18ec4c504abf 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,7 +17,6 @@ #include <linux/nodemask.h> #include <linux/initrd.h> #include <linux/of_fdt.h> -#include <linux/of_reserved_mem.h> #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/memblock.h> @@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi, if (mdesc->reserve) mdesc->reserve(); - early_init_dt_scan_reserved_mem(); - /* * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 0c6356255fe3..d27158c38eb0 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size) } /* - * We don't use supersection mappings for mmap() on /dev/mem, which - * means that we can't map the memory area above the 4G barrier into - * userspace. + * Do not allow /dev/mem mappings beyond the supported physical range. */ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { - return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); + return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); } #ifdef CONFIG_STRICT_DEVMEM |