diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 20:32:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 20:32:06 +0300 |
commit | 4a3bb4200a5958d76cc26ebe4db4257efa56812b (patch) | |
tree | 183c76b93f48d1edb9f9df654ebcd05a08eb6ee7 | |
parent | eceae1e7acaefc0a71e4dd4b8cd49270172b4731 (diff) | |
parent | c1dec343d7abdf8e71aab2a289ab45ce8b1afb7e (diff) | |
download | linux-4a3bb4200a5958d76cc26ebe4db4257efa56812b.tar.xz |
Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- fix debugfs initialization order (Anthony Iliopoulos)
- use memory_intersects() directly (Kefeng Wang)
- allow to return specific errors from ->map_sg (Logan Gunthorpe,
Martin Oliveira)
- turn the dma_map_sg return value into an unsigned int (me)
- provide a common global coherent pool Ń–mplementation (me)
* tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits)
hexagon: use the generic global coherent pool
dma-mapping: make the global coherent pool conditional
dma-mapping: add a dma_init_global_coherent helper
dma-mapping: simplify dma_init_coherent_memory
dma-mapping: allow using the global coherent pool for !ARM
ARM/nommu: use the generic dma-direct code for non-coherent devices
dma-direct: add support for dma_coherent_default_memory
dma-mapping: return an unsigned int from dma_map_sg{,_attrs}
dma-mapping: disallow .map_sg operations from returning zero on error
dma-mapping: return error code from dma_dummy_map_sg()
x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR
x86/amd_gart: return error code from gart_map_sg()
xen: swiotlb: return error code from xen_swiotlb_map_sg()
parisc: return error code from .map_sg() ops
sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
sparc/iommu: return error codes from .map_sg() ops
s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR
s390/pci: return error code from s390_dma_map_sg()
powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
powerpc/iommu: return error code from .map_sg() ops
...
30 files changed, 310 insertions, 443 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 35d7b3096d6e..21f9ac101324 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -649,7 +649,9 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); - return sg->dma_address != DMA_MAPPING_ERROR; + if (sg->dma_address == DMA_MAPPING_ERROR) + return -EIO; + return 1; } start = sg; @@ -685,8 +687,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, if (out < end) out->dma_length = 0; - if (out - start == 0) + if (out - start == 0) { printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); + return -ENOMEM; + } DBGA("pci_map_sg: %ld entries\n", out - start); return out - start; @@ -699,7 +703,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, entries. Unmap them now. */ if (out > start) pci_unmap_sg(pdev, start, out - start, dir); - return 0; + return -ENOMEM; } /* Unmap a set of streaming mode DMA translations. Again, cpu read diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1a48c17a5909..f1d6531b5ce5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -18,8 +18,8 @@ config ARM select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU - select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB - select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB + select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU + select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H @@ -44,6 +44,7 @@ config ARM select CPU_PM if SUSPEND || CPU_IDLE select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DMA_DECLARE_COHERENT + select DMA_GLOBAL_POOL if !MMU select DMA_OPS select DMA_REMAP if MMU select EDAC_SUPPORT diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 6bfd2b884505..cfd9c933d2f0 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -5,12 +5,7 @@ * Copyright (C) 2000-2004 Russell King */ -#include <linux/export.h> -#include <linux/mm.h> -#include <linux/dma-direct.h> #include <linux/dma-map-ops.h> -#include <linux/scatterlist.h> - #include <asm/cachetype.h> #include <asm/cacheflush.h> #include <asm/outercache.h> @@ -18,65 +13,8 @@ #include "dma.h" -/* - * The generic direct mapping code is used if - * - MMU/MPU is off - * - cpu is v7m w/o cache support - * - device is coherent - * otherwise arm_nommu_dma_ops is used. - * - * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to - * [1] on how to declare such memory). - * - * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt - */ - -static void *arm_nommu_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) - -{ - void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle); - - /* - * dma_alloc_from_global_coherent() may fail because: - * - * - no consistent DMA region has been defined, so we can't - * continue. - * - there is no space left in consistent DMA region, so we - * only can fallback to generic allocator if we are - * advertised that consistency is not required. - */ - - WARN_ON_ONCE(ret == NULL); - return ret; -} - -static void arm_nommu_dma_free(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_addr, - unsigned long attrs) -{ - int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); - - WARN_ON_ONCE(ret == 0); -} - -static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - int ret; - - if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) - return ret; - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - return -ENXIO; -} - - -static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { dmac_map_area(__va(paddr), size, dir); @@ -86,8 +24,8 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, outer_clean_range(paddr, paddr + size); } -static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) { if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); @@ -95,102 +33,6 @@ static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, } } -static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t handle = page_to_phys(page) + offset; - - __dma_page_cpu_to_dev(handle, size, dir); - - return handle; -} - -static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - __dma_page_dev_to_cpu(handle, size, dir); -} - - -static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sgl, sg, nents, i) { - sg_dma_address(sg) = sg_phys(sg); - sg_dma_len(sg) = sg->length; - __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); - } - - return nents; -} - -static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); -} - -static void arm_nommu_dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(handle, size, dir); -} - -static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(handle, size, dir); -} - -static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); -} - -static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); -} - -const struct dma_map_ops arm_nommu_dma_ops = { - .alloc = arm_nommu_dma_alloc, - .free = arm_nommu_dma_free, - .alloc_pages = dma_direct_alloc_pages, - .free_pages = dma_direct_free_pages, - .mmap = arm_nommu_dma_mmap, - .map_page = arm_nommu_dma_map_page, - .unmap_page = arm_nommu_dma_unmap_page, - .map_sg = arm_nommu_dma_map_sg, - .unmap_sg = arm_nommu_dma_unmap_sg, - .sync_single_for_device = arm_nommu_dma_sync_single_for_device, - .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, - .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, - .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, -}; -EXPORT_SYMBOL(arm_nommu_dma_ops); - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { @@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, * enough to check if MPU is in use or not since in absense of * MPU system memory map is used. */ - dev->archdata.dma_coherent = (cacheid) ? coherent : true; + dev->dma_coherent = cacheid ? coherent : true; } else { /* * Assume coherent DMA in case MMU/MPU has not been set up. */ - dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; + dev->dma_coherent = (get_cr() & CR_M) ? coherent : true; } - - if (!dev->archdata.dma_coherent) - set_dma_ops(dev, &arm_nommu_dma_ops); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c4b8df2ad328..4b61541853ea 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -980,7 +980,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, { const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; - int i, j; + int i, j, ret; for_each_sg(sg, s, nents, i) { #ifdef CONFIG_NEED_SG_DMA_LENGTH @@ -988,15 +988,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, #endif s->dma_address = ops->map_page(dev, sg_page(s), s->offset, s->length, dir, attrs); - if (dma_mapping_error(dev, s->dma_address)) + if (dma_mapping_error(dev, s->dma_address)) { + ret = -EIO; goto bad_mapping; + } } return nents; bad_mapping: for_each_sg(sg, s, i, j) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); - return 0; + return ret; } /** @@ -1622,7 +1624,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, bool is_coherent) { struct scatterlist *s = sg, *dma = sg, *start = sg; - int i, count = 0; + int i, count = 0, ret; unsigned int offset = s->offset; unsigned int size = s->offset + s->length; unsigned int max = dma_get_max_seg_size(dev); @@ -1630,12 +1632,13 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { - if (__map_sg_chunk(dev, start, size, &dma->dma_address, - dir, attrs, is_coherent) < 0) + ret = __map_sg_chunk(dev, start, size, + &dma->dma_address, dir, attrs, + is_coherent); + if (ret < 0) goto bad_mapping; dma->dma_address += offset; @@ -1648,8 +1651,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, } size += s->length; } - if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, - is_coherent) < 0) + ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, + is_coherent); + if (ret < 0) goto bad_mapping; dma->dma_address += offset; @@ -1660,7 +1664,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, bad_mapping: for_each_sg(sg, s, count, i) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - return 0; + if (ret == -ENOMEM) + return ret; + return -EINVAL; } /** diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index e5a852080730..aab1a40eb653 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -7,6 +7,7 @@ config HEXAGON select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_PREEMPT + select DMA_GLOBAL_POOL # Other pending projects/to-do items. # select HAVE_REGS_AND_STACK_ACCESS_API # select HAVE_HW_BREAKPOINT if PERF_EVENTS diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 00b9a81075dd..882680e81a30 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -7,54 +7,8 @@ #include <linux/dma-map-ops.h> #include <linux/memblock.h> -#include <linux/genalloc.h> -#include <linux/module.h> #include <asm/page.h> -static struct gen_pool *coherent_pool; - - -/* Allocates from a pool of uncached memory that was reserved at boot time */ - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr, - gfp_t flag, unsigned long attrs) -{ - void *ret; - - /* - * Our max_low_pfn should have been backed off by 16MB in - * mm/init.c to create DMA coherent space. Use that as the VA - * for the pool. - */ - - if (coherent_pool == NULL) { - coherent_pool = gen_pool_create(PAGE_SHIFT, -1); - - if (coherent_pool == NULL) - panic("Can't create %s() memory pool!", __func__); - else - gen_pool_add(coherent_pool, - (unsigned long)pfn_to_virt(max_low_pfn), - hexagon_coherent_pool_size, -1); - } - - ret = (void *) gen_pool_alloc(coherent_pool, size); - - if (ret) { - memset(ret, 0, size); - *dma_addr = (dma_addr_t) virt_to_phys(ret); - } else - *dma_addr = ~0; - - return ret; -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - gen_pool_free(coherent_pool, (unsigned long) vaddr, size); -} - void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { @@ -77,3 +31,14 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, BUG(); } } + +/* + * Our max_low_pfn should have been backed off by 16MB in mm/init.c to create + * DMA coherent space. Use that for the pool. + */ +static int __init hexagon_dma_init(void) +{ + return dma_init_global_coherent(PFN_PHYS(max_low_pfn), + hexagon_coherent_pool_size); +} +core_initcall(hexagon_dma_init); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 9148ddbf02e5..8ad6946521d8 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1459,7 +1459,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, sglist->dma_address = sba_map_page(dev, sg_page(sglist), sglist->offset, sglist->length, dir, attrs); if (dma_mapping_error(dev, sglist->dma_address)) - return 0; + return -EIO; return 1; } @@ -1486,7 +1486,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); if (coalesced < 0) { sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); - return 0; + return -ENOMEM; } /* diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index 461457b28982..eabddb89d221 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -552,7 +552,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist, dir); sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); if (sg->dma_address == DMA_MAPPING_ERROR) - return 0; + return -EIO; sg_dma_len(sg) = sg->length; } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 2af89a5e379f..30b7736f0896 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -473,7 +473,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, BUG_ON(direction == DMA_NONE); if ((nelems == 0) || !tbl) - return 0; + return -EINVAL; outs = s = segstart = &sglist[0]; outcount = 1; @@ -575,7 +575,6 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, */ if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -593,13 +592,12 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = iommu_num_pages(s->dma_address, s->dma_length, IOMMU_PAGE_SIZE(tbl)); __iommu_free(tbl, vaddr, npages); - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) break; } - return 0; + return -EIO; } diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index cc5774c64fae..c8b50fec56bf 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -662,7 +662,7 @@ static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, unsigned long attrs) { BUG(); - return 0; + return -EINVAL; } static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 58283cecbd52..feafcb582e1b 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -560,7 +560,8 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sgl, nelems, count) alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); - if (vio_cmo_alloc(viodev, alloc_size)) + ret = vio_cmo_alloc(viodev, alloc_size); + if (ret) goto out_fail; ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), direction, attrs); @@ -577,7 +578,7 @@ out_deallocate: vio_cmo_dealloc(viodev, alloc_size); out_fail: atomic_inc(&viodev->cmo.allocs_failed); - return 0; + return ret; } static void vio_dma_iommu_unmap_sg(struct device *dev, diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 58f2f7abea96..93223bd110c3 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -487,18 +487,18 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned int max = dma_get_max_seg_size(dev); unsigned int size = s->offset + s->length; unsigned int offset = s->offset; - int count = 0, i; + int count = 0, i, ret; for (i = 1; i < nr_elements; i++) { s = sg_next(s); - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { - if (__s390_dma_map_sg(dev, start, size, - &dma->dma_address, dir)) + ret = __s390_dma_map_sg(dev, start, size, + &dma->dma_address, dir); + if (ret) goto unmap; dma->dma_address += offset; @@ -511,7 +511,8 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, } size += s->length; } - if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir)) + ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir); + if (ret) goto unmap; dma->dma_address += offset; @@ -523,7 +524,7 @@ unmap: s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); - return 0; + return ret; } static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index a034f571d869..da0363692528 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -448,7 +448,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, iommu = dev->archdata.iommu; strbuf = dev->archdata.stc; if (nelems == 0 || !iommu) - return 0; + return -EINVAL; spin_lock_irqsave(&iommu->lock, flags); @@ -546,7 +546,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -572,7 +571,6 @@ iommu_map_failed: iommu_tbl_range_free(&iommu->tbl, vaddr, npages, IOMMU_ERROR_CODE); - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -580,7 +578,7 @@ iommu_map_failed: } spin_unlock_irqrestore(&iommu->lock, flags); - return 0; + return -EINVAL; } /* If contexts are being used, they are the same in all of the mappings diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 9de57e88f7a1..384480971805 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -486,7 +486,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu = dev->archdata.iommu; if (nelems == 0 || !iommu) - return 0; + return -EINVAL; atu = iommu->atu; prot = HV_PCI_MAP_ATTR_READ; @@ -594,7 +594,6 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (outcount < incount) { outs = sg_next(outs); - outs->dma_address = DMA_MAPPING_ERROR; outs->dma_length = 0; } @@ -611,7 +610,6 @@ iommu_map_failed: iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ - s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; } if (s == outs) @@ -619,7 +617,7 @@ iommu_map_failed: } local_irq_restore(flags); - return 0; + return -EINVAL; } static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0c0342e5b10d..9e3f6933ca13 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -256,7 +256,7 @@ static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg), sg->offset, sg->length, per_page_flush); if (sg->dma_address == DMA_MAPPING_ERROR) - return 0; + return -EIO; sg->dma_length = sg->length; } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 9ac696487b13..ed837383de5c 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -331,7 +331,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, int i; if (iommu_start == -1) - return -1; + return -ENOMEM; for_each_sg(start, s, nelems, i) { unsigned long pages, addr; @@ -380,13 +380,13 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *s, *ps, *start_sg, *sgmap; - int need = 0, nextneed, i, out, start; + int need = 0, nextneed, i, out, start, ret; unsigned long pages = 0; unsigned int seg_size; unsigned int max_seg_size; if (nents == 0) - return 0; + return -EINVAL; out = 0; start = 0; @@ -414,8 +414,9 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, if (!iommu_merge || !nextneed || !need || s->offset || (s->length + seg_size > max_seg_size) || (ps->offset + ps->length) % PAGE_SIZE) { - if (dma_map_cont(dev, start_sg, i - start, - sgmap, pages, need) < 0) + ret = dma_map_cont(dev, start_sg, i - start, + sgmap, pages, need); + if (ret < 0) goto error; out++; @@ -432,7 +433,8 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); ps = s; } - if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) + ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need); + if (ret < 0) goto error; out++; flush_gart(); @@ -456,9 +458,7 @@ error: panic("dma_map_sg: overflow on %lu pages\n", pages); iommu_full(dev, pages << PAGE_SHIFT, dir); - for_each_sg(sg, s, nents, i) - s->dma_address = DMA_MAPPING_ERROR; - return 0; + return ret; } /* allocate and map a coherent mapping */ diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 6f0df629353f..9b4c025b8871 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -973,7 +973,7 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, out_unmap: iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return 0; + return -EIO; } /* @@ -994,11 +994,13 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); + ssize_t ret; int i; - if (static_branch_unlikely(&iommu_deferred_attach_enabled) && - iommu_deferred_attach(dev, domain)) - return 0; + if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { + ret = iommu_deferred_attach(dev, domain); + goto out; + } if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); @@ -1046,14 +1048,17 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, } iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); - if (!iova) + if (!iova) { + ret = -ENOMEM; goto out_restore_sg; + } /* * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) + ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); + if (ret < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); @@ -1062,7 +1067,10 @@ out_free_iova: iommu_dma_free_iova(cookie, iova, iova_len, NULL); out_restore_sg: __invalidate_sg(sg, nents); - return 0; +out: + if (ret != -ENOMEM) + return -EINVAL; + return ret; } static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 63f0af10c403..5a570d495f8d 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2570,9 +2570,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot, - gfp_t gfp) +static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot, + gfp_t gfp) { const struct iommu_ops *ops = domain->ops; size_t len = 0, mapped = 0; @@ -2613,19 +2613,18 @@ out_err: /* undo mappings already done */ iommu_unmap(domain, iova, mapped); - return 0; - + return ret; } -size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot) +ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) { might_sleep(); return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); } EXPORT_SYMBOL_GPL(iommu_map_sg); -size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, +ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b5f9ee81a46c..452e72b7bd01 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -918,7 +918,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); if (!ioc) - return 0; + return -EINVAL; DBG_RUN_SG("%s() START %d entries\n", __func__, nents); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index dce4cdf786cd..e60690d38d67 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -947,7 +947,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc = GET_IOC(dev); if (!ioc) - return 0; + return -EINVAL; /* Fast path single entry scatterlists. */ if (nents == 1) { diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 24d11861ac7d..85d58b720a24 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -509,7 +509,7 @@ xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, out_unmap: xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); sg_dma_len(sgl) = 0; - return 0; + return -EIO; } static void diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d53a96a3d64..0d5b06b3a4a6 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -41,8 +41,9 @@ struct dma_map_ops { size_t size, enum dma_data_direction dir, unsigned long attrs); /* - * map_sg returns 0 on error and a value > 0 on success. - * It should never return a value < 0. + * map_sg should return a negative error code on error. See + * dma_map_sgtable() for a list of appropriate error codes + * and their meanings. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); @@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); - -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle); -int dma_release_from_global_coherent(int order, void *vaddr); -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, - size_t size, int *ret); - #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) @@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev, #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#ifdef CONFIG_DMA_GLOBAL_POOL +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); +#else static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { @@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, { return 0; } -#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#endif /* CONFIG_DMA_GLOBAL_POOL */ /* * This is the actual return value from the ->alloc_noncontiguous method. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 183e7103a66d..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, unsigned long attrs); void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, @@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +static inline unsigned int dma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } @@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev, unsigned long attrs) { } +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + return -EOPNOTSUPP; +} static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -344,34 +352,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev, } /** - * dma_map_sgtable - Map the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * @attrs: Optional DMA attributes for the map operation - * - * Maps a buffer described by a scatterlist stored in the given sg_table - * object for the @dir DMA operation by the @dev device. After success the - * ownership for the buffer is transferred to the DMA domain. One has to - * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the - * ownership of the buffer back to the CPU domain before touching the - * buffer by the CPU. - * - * Returns 0 on success or -EINVAL on error during mapping the buffer. - */ -static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); - if (nents <= 0) - return -EINVAL; - sgt->nents = nents; - return 0; -} - -/** * dma_unmap_sgtable - Unmap the given buffer for DMA * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 32d448050bf7..9369458ba1bd 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -414,11 +414,11 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); -extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg,unsigned int nents, int prot); -extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot); +extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); +extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -679,18 +679,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain, return 0; } -static inline size_t iommu_map_sg(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot) +static inline ssize_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) { - return 0; + return -ENODEV; } -static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, +static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { - return 0; + return -ENODEV; } static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 77b405508743..725cfd51762b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -93,6 +93,10 @@ config DMA_COHERENT_POOL select GENERIC_ALLOCATOR bool +config DMA_GLOBAL_POOL + select DMA_DECLARE_COHERENT + bool + config DMA_REMAP bool depends on MMU diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 794e76b03b34..25fc85a7aebe 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -20,8 +20,6 @@ struct dma_coherent_mem { bool use_dev_dma_pfn_offset; }; -static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; - static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) { if (dev && dev->dma_mem) @@ -37,51 +35,44 @@ static inline dma_addr_t dma_get_device_base(struct device *dev, return mem->device_base; } -static int dma_init_coherent_memory(phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, - struct dma_coherent_mem **mem) +static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) { - struct dma_coherent_mem *dma_mem = NULL; - void *mem_base = NULL; + struct dma_coherent_mem *dma_mem; int pages = size >> PAGE_SHIFT; int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); - int ret; + void *mem_base; - if (!size) { - ret = -EINVAL; - goto out; - } + if (!size) + return ERR_PTR(-EINVAL); mem_base = memremap(phys_addr, size, MEMREMAP_WC); - if (!mem_base) { - ret = -EINVAL; - goto out; - } + if (!mem_base) + return ERR_PTR(-EINVAL); + dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dma_mem) { - ret = -ENOMEM; - goto out; - } + if (!dma_mem) + goto out_unmap_membase; dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dma_mem->bitmap) { - ret = -ENOMEM; - goto out; - } + if (!dma_mem->bitmap) + goto out_free_dma_mem; dma_mem->virt_base = mem_base; dma_mem->device_base = device_addr; dma_mem->pfn_base = PFN_DOWN(phys_addr); dma_mem->size = pages; + dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; spin_lock_init(&dma_mem->spinlock); - *mem = dma_mem; - return 0; + return dma_mem; -out: +out_free_dma_mem: kfree(dma_mem); - if (mem_base) - memunmap(mem_base); - return ret; +out_unmap_membase: + memunmap(mem_base); + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n", + &phys_addr, size / SZ_1M); + return ERR_PTR(-ENOMEM); } static void dma_release_coherent_memory(struct dma_coherent_mem *mem) @@ -130,9 +121,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, struct dma_coherent_mem *mem; int ret; - ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); - if (ret) - return ret; + mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); + if (IS_ERR(mem)) + return PTR_ERR(mem); ret = dma_assign_coherent_memory(dev, mem); if (ret) @@ -198,16 +189,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, return 1; } -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle) -{ - if (!dma_coherent_default_memory) - return NULL; - - return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, - dma_handle); -} - static int __dma_release_from_coherent(struct dma_coherent_mem *mem, int order, void *vaddr) { @@ -243,15 +224,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) return __dma_release_from_coherent(mem, order, vaddr); } -int dma_release_from_global_coherent(int order, void *vaddr) -{ - if (!dma_coherent_default_memory) - return 0; - - return __dma_release_from_coherent(dma_coherent_default_memory, order, - vaddr); -} - static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { @@ -297,6 +269,28 @@ int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); } +#ifdef CONFIG_DMA_GLOBAL_POOL +static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; + +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle) +{ + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, + dma_handle); +} + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) { @@ -307,6 +301,19 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, vaddr, size, ret); } +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) +{ + struct dma_coherent_mem *mem; + + mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); + if (IS_ERR(mem)) + return PTR_ERR(mem); + dma_coherent_default_memory = mem; + pr_info("DMA: default coherent area is set\n"); + return 0; +} +#endif /* CONFIG_DMA_GLOBAL_POOL */ + /* * Support for reserved memory regions defined in device tree */ @@ -315,25 +322,22 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, #include <linux/of_fdt.h> #include <linux/of_reserved_mem.h> +#ifdef CONFIG_DMA_GLOBAL_POOL static struct reserved_mem *dma_reserved_default_memory __initdata; +#endif static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { - struct dma_coherent_mem *mem = rmem->priv; - int ret; - - if (!mem) { - ret = dma_init_coherent_memory(rmem->base, rmem->base, - rmem->size, &mem); - if (ret) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return ret; - } + if (!rmem->priv) { + struct dma_coherent_mem *mem; + + mem = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, true); + if (IS_ERR(mem)) + return PTR_ERR(mem); + rmem->priv = mem; } - mem->use_dev_dma_pfn_offset = true; - rmem->priv = mem; - dma_assign_coherent_memory(dev, mem); + dma_assign_coherent_memory(dev, rmem->priv); return 0; } @@ -361,7 +365,9 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) pr_err("Reserved memory: regions without no-map are not yet supported\n"); return -EINVAL; } +#endif +#ifdef CONFIG_DMA_GLOBAL_POOL if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { WARN(dma_reserved_default_memory, "Reserved memory: region for default DMA coherent area is redefined\n"); @@ -375,31 +381,16 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem) return 0; } +#ifdef CONFIG_DMA_GLOBAL_POOL static int __init dma_init_reserved_memory(void) { - const struct reserved_mem_ops *ops; - int ret; - if (!dma_reserved_default_memory) return -ENOMEM; - - ops = dma_reserved_default_memory->ops; - - /* - * We rely on rmem_dma_device_init() does not propagate error of - * dma_assign_coherent_memory() for "NULL" device. - */ - ret = ops->device_init(dma_reserved_default_memory, NULL); - - if (!ret) { - dma_coherent_default_memory = dma_reserved_default_memory->priv; - pr_info("DMA: default coherent area is set\n"); - } - - return ret; + return dma_init_global_coherent(dma_reserved_default_memory->base, + dma_reserved_default_memory->size); } - core_initcall(dma_init_reserved_memory); +#endif /* CONFIG_DMA_GLOBAL_POOL */ RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); #endif diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index dadae6255d05..6c90c69e5311 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -792,7 +792,7 @@ static int dump_show(struct seq_file *seq, void *v) } DEFINE_SHOW_ATTRIBUTE(dump); -static void dma_debug_fs_init(void) +static int __init dma_debug_fs_init(void) { struct dentry *dentry = debugfs_create_dir("dma-api", NULL); @@ -805,7 +805,10 @@ static void dma_debug_fs_init(void) debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); + + return 0; } +core_initcall_sync(dma_debug_fs_init); static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) { @@ -890,8 +893,6 @@ static int dma_debug_init(void) spin_lock_init(&dma_entry_hash[i].lock); } - dma_debug_fs_init(); - nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); for (i = 0; i < nr_pages; ++i) dma_debug_create_entries(GFP_KERNEL); @@ -1064,20 +1065,10 @@ static void check_for_stack(struct device *dev, } } -static inline bool overlap(void *addr, unsigned long len, void *start, void *end) -{ - unsigned long a1 = (unsigned long)addr; - unsigned long b1 = a1 + len; - unsigned long a2 = (unsigned long)start; - unsigned long b2 = (unsigned long)end; - - return !(b1 <= a2 || a1 >= b2); -} - static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { - if (overlap(addr, len, _stext, _etext) || - overlap(addr, len, __start_rodata, __end_rodata)) + if (memory_intersects(_stext, _etext, addr, len) || + memory_intersects(__start_rodata, __end_rodata, addr, len)) err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f737e3347059..8dca4f97d12d 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) + return dma_alloc_from_global_coherent(dev, size, dma_handle); + /* * Remapping or decrypting memory may block. If either is required and * we can't block, allocate the memory from the atomic pools. @@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) { arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); return; } + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) { + if (!dma_release_from_global_coherent(page_order, cpu_addr)) + WARN_ON_ONCE(1); + return; + } + /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) @@ -411,7 +424,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, out_unmap: dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return 0; + return -EIO; } dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, @@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) + return ret; if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) return -ENXIO; diff --git a/kernel/dma/dummy.c b/kernel/dma/dummy.c index eacd4c5b10bf..b492d59ac77e 100644 --- a/kernel/dma/dummy.c +++ b/kernel/dma/dummy.c @@ -22,7 +22,7 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { - return 0; + return -EINVAL; } static int dma_dummy_supported(struct device *hwdev, u64 mask) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 2b06a809d0b9..7ee5284bff58 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -177,12 +177,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, } EXPORT_SYMBOL(dma_unmap_page_attrs); -/* - * dma_maps_sg_attrs returns 0 on error and > 0 on success. - * It should never return a value < 0. - */ -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) +static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); int ents; @@ -197,13 +193,81 @@ int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); else ents = ops->map_sg(dev, sg, nents, dir, attrs); - BUG_ON(ents < 0); - debug_dma_map_sg(dev, sg, nents, ents, dir); + + if (ents > 0) + debug_dma_map_sg(dev, sg, nents, ents, dir); + else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && + ents != -EIO)) + return -EIO; return ents; } + +/** + * dma_map_sg_attrs - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sg: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist passed in the sg argument with + * nents segments for the @dir DMA operation by the @dev device. + * + * Returns the number of mapped entries (which can be less than nents) + * on success. Zero is returned for any error. + * + * dma_unmap_sg_attrs() should be used to unmap the buffer with the + * original sg and original nents (not the value returned by this funciton). + */ +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + int ret; + + ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); + if (ret < 0) + return 0; + return ret; +} EXPORT_SYMBOL(dma_map_sg_attrs); +/** + * dma_map_sgtable - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After success, the + * ownership for the buffer is transferred to the DMA domain. One has to + * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the + * ownership of the buffer back to the CPU domain before touching the + * buffer by the CPU. + * + * Returns 0 on success or a negative error code on error. The following + * error codes are supported with the given meaning: + * + * -EINVAL - An invalid argument, unaligned access or other error + * in usage. Will not succeed if retried. + * -ENOMEM - Insufficient resources (like memory or IOVA space) to + * complete the mapping. Should succeed if retried later. + * -EIO - Legacy error code with an unknown meaning. eg. this is + * returned if a lower level call returned DMA_MAPPING_ERROR. + */ +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + if (nents < 0) + return nents; + sgt->nents = nents; + return 0; +} +EXPORT_SYMBOL_GPL(dma_map_sgtable); + void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) |