diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 22:32:27 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 22:32:27 +0300 |
commit | 2382dc9a3eca644147be83dd2cd0dd64dc9e3e8c (patch) | |
tree | 71a152721a9b9b11875bf5ea053edb72c9b3a94e /arch/arm64 | |
parent | 28bc6fb9596fe1e577d09fc17ee6e1bb051c6ba3 (diff) | |
parent | 04f56534786c885f578c24461bcd782fe9a787cf (diff) | |
download | linux-2382dc9a3eca644147be83dd2cd0dd64dc9e3e8c.tar.xz |
Merge tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping
Pull dma mapping updates from Christoph Hellwig:
"Except for a runtime warning fix from Christian this is all about
consolidation of the generic no-IOMMU code, a well as the glue code
for swiotlb.
All the code is based on the x86 implementation with hooks to allow
all architectures that aren't cache coherent to use it.
The x86 conversion itself has been deferred because the x86
maintainers were a little busy in the last months"
* tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping: (57 commits)
MAINTAINERS: add the iommu list for swiotlb and xen-swiotlb
arm64: use swiotlb_alloc and swiotlb_free
arm64: replace ZONE_DMA with ZONE_DMA32
mips: use swiotlb_{alloc,free}
mips/netlogic: remove swiotlb support
tile: use generic swiotlb_ops
tile: replace ZONE_DMA with ZONE_DMA32
unicore32: use generic swiotlb_ops
ia64: remove an ifdef around the content of pci-dma.c
ia64: clean up swiotlb support
ia64: use generic swiotlb_ops
ia64: replace ZONE_DMA with ZONE_DMA32
swiotlb: remove various exports
swiotlb: refactor coherent buffer allocation
swiotlb: refactor coherent buffer freeing
swiotlb: wire up ->dma_supported in swiotlb_dma_ops
swiotlb: add common swiotlb_map_ops
swiotlb: rename swiotlb_free to swiotlb_exit
x86: rename swiotlb_dma_ops
powerpc: rename swiotlb_dma_ops
...
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/dma-mapping.h | 35 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 54 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 16 |
4 files changed, 17 insertions, 91 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b488076d63c2..b2b95f79c746 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -59,6 +59,7 @@ config ARM64 select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select DMA_DIRECT_OPS select EDAC_SUPPORT select FRAME_POINTER select GENERIC_ALLOCATOR @@ -227,7 +228,7 @@ config GENERIC_CSUM config GENERIC_CALIBRATE_DELAY def_bool y -config ZONE_DMA +config ZONE_DMA32 def_bool y config HAVE_GENERIC_GUP diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 0df756b24863..b7847eb8a7bb 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -50,40 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev) return dev->archdata.dma_coherent; } -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -{ - dma_addr_t dev_addr = (dma_addr_t)paddr; - - return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); -} - -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) -{ - phys_addr_t paddr = (phys_addr_t)dev_addr; - - return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); -} - -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) -{ - if (!dev->dma_mask) - return false; - - return addr + size - 1 <= *dev->dma_mask; -} - -static inline void dma_mark_clean(void *addr, size_t size) -{ -} - -/* Override for dma_max_pfn() */ -static inline unsigned long dma_max_pfn(struct device *dev) -{ - dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask; - - return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT; -} -#define dma_max_pfn(dev) dma_max_pfn(dev) - #endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index b45c5bcaeccb..a96ec0181818 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -24,7 +24,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/genalloc.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> @@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size) return 1; } -static void *__dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - if (IS_ENABLED(CONFIG_ZONE_DMA) && - dev->coherent_dma_mask <= DMA_BIT_MASK(32)) - flags |= GFP_DMA; - if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { - struct page *page; - void *addr; - - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), flags); - if (!page) - return NULL; - - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - addr = page_address(page); - memset(addr, 0, size); - return addr; - } else { - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); - } -} - -static void __dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - bool freed; - phys_addr_t paddr = dma_to_phys(dev, dma_handle); - - - freed = dma_release_from_contiguous(dev, - phys_to_page(paddr), - size >> PAGE_SHIFT); - if (!freed) - swiotlb_free_coherent(dev, size, vaddr, dma_handle); -} - static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) @@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return addr; } - ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); + ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; @@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size, return coherent_ptr; no_map: - __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); + swiotlb_free(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; } @@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size, return; vunmap(vaddr); } - __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); + swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); } static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, @@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) return 0; } -static const struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops arm64_swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, @@ -397,7 +357,7 @@ static int __init atomic_pool_init(void) page = dma_alloc_from_contiguous(NULL, nr_pages, pool_size_order, GFP_KERNEL); else - page = alloc_pages(GFP_DMA, pool_size_order); + page = alloc_pages(GFP_DMA32, pool_size_order); if (page) { int ret; @@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { if (!dev->dma_ops) - dev->dma_ops = &swiotlb_dma_ops; + dev->dma_ops = &arm64_swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index c903f7ccbdd2..9f3c47acf8ff 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */ /* - * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It * currently assumes that for memory starting above 4G, 32-bit devices will * use a DMA offset. */ @@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - if (IS_ENABLED(CONFIG_ZONE_DMA)) - max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); + if (IS_ENABLED(CONFIG_ZONE_DMA32)) + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); @@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) memset(zone_size, 0, sizeof(zone_size)); /* 4GB maximum for 32-bit only capable devices */ -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 max_dma = PFN_DOWN(arm64_dma_phys_limit); - zone_size[ZONE_DMA] = max_dma - min; + zone_size[ZONE_DMA32] = max_dma - min; #endif zone_size[ZONE_NORMAL] = max - max_dma; @@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (start >= max) continue; -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 if (start < max_dma) { unsigned long dma_end = min(end, max_dma); - zhole_size[ZONE_DMA] -= dma_end - start; + zhole_size[ZONE_DMA32] -= dma_end - start; } #endif if (end > max_dma) { @@ -470,7 +470,7 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ - if (IS_ENABLED(CONFIG_ZONE_DMA)) + if (IS_ENABLED(CONFIG_ZONE_DMA32)) arm64_dma_phys_limit = max_zone_dma_phys(); else arm64_dma_phys_limit = PHYS_MASK + 1; |